From 1bd7cb03a3c23284e222ca15cf942cac3894abdd Mon Sep 17 00:00:00 2001 From: Joel Takvorian Date: Mon, 18 Sep 2023 15:56:43 +0200 Subject: [PATCH] Bump dependencies (kube client, prom model...) (#497) --- go.mod | 67 +- go.sum | 252 +- pkg/test/e2e/utils.go | 5 +- .../github.com/PuerkitoBio/purell/.gitignore | 5 - .../github.com/PuerkitoBio/purell/.travis.yml | 12 - vendor/github.com/PuerkitoBio/purell/LICENSE | 12 - .../github.com/PuerkitoBio/purell/README.md | 188 - .../github.com/PuerkitoBio/purell/purell.go | 379 - .../github.com/PuerkitoBio/urlesc/.travis.yml | 15 - .../github.com/PuerkitoBio/urlesc/README.md | 16 - .../github.com/PuerkitoBio/urlesc/urlesc.go | 180 - .../github.com/emicklei/go-restful/Makefile | 5 - .../emicklei/go-restful/{ => v3}/.gitignore | 0 .../emicklei/go-restful/v3/.goconvey | 1 + .../emicklei/go-restful/{ => v3}/.travis.yml | 0 .../emicklei/go-restful/{ => v3}/CHANGES.md | 212 +- .../emicklei/go-restful/{ => v3}/LICENSE | 0 .../emicklei/go-restful/v3/Makefile | 8 + .../emicklei/go-restful/{ => v3}/README.md | 34 +- .../emicklei/go-restful/v3/SECURITY.md | 13 + .../emicklei/go-restful/{ => v3}/Srcfile | 0 .../go-restful/{ => v3}/bench_test.sh | 0 .../emicklei/go-restful/{ => v3}/compress.go | 6 +- .../go-restful/{ => v3}/compressor_cache.go | 0 .../go-restful/{ => v3}/compressor_pools.go | 0 .../go-restful/{ => v3}/compressors.go | 0 .../emicklei/go-restful/{ => v3}/constants.go | 2 + .../emicklei/go-restful/{ => v3}/container.go | 15 +- .../go-restful/{ => v3}/cors_filter.go | 0 .../emicklei/go-restful/{ => v3}/coverage.sh | 0 .../emicklei/go-restful/{ => v3}/curly.go | 0 .../go-restful/{ => v3}/curly_route.go | 0 .../go-restful/{ => v3}/custom_verb.go | 0 .../emicklei/go-restful/{ => v3}/doc.go | 6 +- .../go-restful/{ => v3}/entity_accessors.go | 0 .../emicklei/go-restful/v3/extensions.go | 21 + .../emicklei/go-restful/{ => v3}/filter.go | 8 +- .../emicklei/go-restful/v3/filter_adapter.go | 21 + .../emicklei/go-restful/{ => v3}/json.go | 0 .../emicklei/go-restful/{ => v3}/jsoniter.go | 0 .../emicklei/go-restful/{ => v3}/jsr311.go | 10 + .../emicklei/go-restful/{ => v3}/log/log.go | 0 .../emicklei/go-restful/{ => v3}/logger.go | 2 +- .../emicklei/go-restful/{ => v3}/mime.go | 0 .../go-restful/{ => v3}/options_filter.go | 0 .../emicklei/go-restful/{ => v3}/parameter.go | 101 +- .../go-restful/{ => v3}/path_expression.go | 0 .../go-restful/{ => v3}/path_processor.go | 0 .../emicklei/go-restful/{ => v3}/request.go | 25 +- .../emicklei/go-restful/{ => v3}/response.go | 3 + .../emicklei/go-restful/{ => v3}/route.go | 9 +- .../go-restful/{ => v3}/route_builder.go | 50 +- .../go-restful/{ => v3}/route_reader.go | 0 .../emicklei/go-restful/{ => v3}/router.go | 0 .../go-restful/{ => v3}/service_error.go | 0 .../go-restful/{ => v3}/web_service.go | 32 +- .../{ => v3}/web_service_container.go | 0 .../github.com/evanphx/json-patch/.gitignore | 6 - .../github.com/evanphx/json-patch/README.md | 317 - .../evanphx/json-patch/{ => v5}/LICENSE | 0 .../evanphx/json-patch/{ => v5}/errors.go | 0 .../evanphx/json-patch/{ => v5}/merge.go | 71 +- .../evanphx/json-patch/{ => v5}/patch.go | 444 +- .../github.com/fsnotify/fsnotify/.gitignore | 10 +- vendor/github.com/fsnotify/fsnotify/AUTHORS | 62 - .../github.com/fsnotify/fsnotify/CHANGELOG.md | 133 +- .../fsnotify/fsnotify/CONTRIBUTING.md | 89 +- vendor/github.com/fsnotify/fsnotify/LICENSE | 47 +- vendor/github.com/fsnotify/fsnotify/README.md | 211 +- .../fsnotify/fsnotify/backend_fen.go | 162 + .../fsnotify/fsnotify/backend_inotify.go | 459 + .../fsnotify/fsnotify/backend_kqueue.go | 707 + .../fsnotify/fsnotify/backend_other.go | 66 + .../fsnotify/fsnotify/backend_windows.go | 746 ++ vendor/github.com/fsnotify/fsnotify/fen.go | 38 - .../github.com/fsnotify/fsnotify/fsnotify.go | 80 +- .../github.com/fsnotify/fsnotify/inotify.go | 338 - .../fsnotify/fsnotify/inotify_poller.go | 188 - vendor/github.com/fsnotify/fsnotify/kqueue.go | 522 - vendor/github.com/fsnotify/fsnotify/mkdoc.zsh | 208 + .../{open_mode_bsd.go => system_bsd.go} | 4 - .../{open_mode_darwin.go => system_darwin.go} | 4 - .../github.com/fsnotify/fsnotify/windows.go | 562 - vendor/github.com/go-kit/log/README.md | 5 + vendor/github.com/go-kit/log/json_logger.go | 4 +- vendor/github.com/go-kit/log/level/doc.go | 11 + vendor/github.com/go-kit/log/level/level.go | 53 +- vendor/github.com/go-kit/log/staticcheck.conf | 1 + vendor/github.com/go-logr/logr/.golangci.yaml | 3 - vendor/github.com/go-logr/logr/discard.go | 32 +- vendor/github.com/go-logr/logr/logr.go | 166 +- .../go-openapi/jsonpointer/.travis.yml | 15 - .../go-openapi/jsonreference/.golangci.yml | 13 +- .../go-openapi/jsonreference/.travis.yml | 24 - .../jsonreference/internal/normalize_url.go | 69 + .../go-openapi/jsonreference/reference.go | 6 +- .../github.com/go-openapi/swag/.gitattributes | 2 + .../github.com/go-openapi/swag/.golangci.yml | 15 + vendor/github.com/go-openapi/swag/.travis.yml | 37 - vendor/github.com/go-openapi/swag/doc.go | 15 +- vendor/github.com/go-openapi/swag/file.go | 33 + vendor/github.com/go-openapi/swag/loading.go | 11 +- .../github.com/go-openapi/swag/post_go18.go | 1 + .../github.com/go-openapi/swag/post_go19.go | 1 + vendor/github.com/go-openapi/swag/pre_go18.go | 1 + vendor/github.com/go-openapi/swag/pre_go19.go | 1 + vendor/github.com/go-openapi/swag/util.go | 17 +- vendor/github.com/go-openapi/swag/yaml.go | 252 +- .../{gnostic => gnostic-models}/LICENSE | 0 .../compiler/README.md | 0 .../compiler/context.go | 0 .../compiler/error.go | 0 .../compiler/extensions.go | 2 +- .../compiler/helpers.go | 2 +- .../compiler/main.go | 0 .../compiler/reader.go | 0 .../extensions/README.md | 0 .../extensions/extension.pb.go | 4 +- .../extensions/extension.proto | 0 .../extensions/extensions.go | 0 .../jsonschema/README.md | 0 .../jsonschema/base.go | 15 +- .../jsonschema/display.go | 0 .../jsonschema/models.go | 0 .../jsonschema/operations.go | 0 .../jsonschema/reader.go | 0 .../jsonschema/schema.json | 0 .../jsonschema/writer.go | 0 .../openapiv2/OpenAPIv2.go | 2 +- .../openapiv2/OpenAPIv2.pb.go | 4 +- .../openapiv2/OpenAPIv2.proto | 0 .../openapiv2/README.md | 0 .../openapiv2/document.go | 2 +- .../openapiv2/openapi-2.0.json | 0 .../openapiv3/OpenAPIv3.go | 2 +- .../openapiv3/OpenAPIv3.pb.go | 4 +- .../openapiv3/OpenAPIv3.proto | 0 .../openapiv3/README.md | 0 .../openapiv3/document.go | 2 +- .../google/gnostic/openapiv3/openapi-3.0.json | 1251 -- .../google/gnostic/openapiv3/openapi-3.1.json | 1250 -- vendor/github.com/google/gofuzz/.travis.yml | 11 +- .../github.com/google/gofuzz/CONTRIBUTING.md | 2 +- vendor/github.com/google/gofuzz/README.md | 18 + .../google/gofuzz/bytesource/bytesource.go | 81 + vendor/github.com/google/gofuzz/fuzz.go | 137 +- .../github.com/imdario/mergo/CONTRIBUTING.md | 112 + vendor/github.com/imdario/mergo/README.md | 35 +- vendor/github.com/imdario/mergo/SECURITY.md | 14 + vendor/github.com/imdario/mergo/map.go | 6 +- vendor/github.com/imdario/mergo/merge.go | 61 +- vendor/github.com/imdario/mergo/mergo.go | 15 +- .../mailru/easyjson/jlexer/lexer.go | 14 + .../moby/spdystream/CONTRIBUTING.md | 13 + vendor/github.com/moby/spdystream/LICENSE | 202 + vendor/github.com/moby/spdystream/MAINTAINERS | 40 + vendor/github.com/moby/spdystream/NOTICE | 5 + vendor/github.com/moby/spdystream/README.md | 77 + .../github.com/moby/spdystream/connection.go | 972 ++ vendor/github.com/moby/spdystream/handlers.go | 52 + vendor/github.com/moby/spdystream/priority.go | 114 + .../moby/spdystream/spdy/dictionary.go | 203 + .../github.com/moby/spdystream/spdy/read.go | 364 + .../github.com/moby/spdystream/spdy/types.go | 291 + .../github.com/moby/spdystream/spdy/write.go | 334 + vendor/github.com/moby/spdystream/stream.go | 343 + vendor/github.com/moby/spdystream/utils.go | 32 + .../client_golang/prometheus/collector.go | 6 +- .../client_golang/prometheus/counter.go | 39 +- .../client_golang/prometheus/desc.go | 47 +- .../client_golang/prometheus/doc.go | 107 +- .../client_golang/prometheus/gauge.go | 32 +- .../client_golang/prometheus/get_pid.go | 26 + .../prometheus/get_pid_gopherjs.go | 23 + .../client_golang/prometheus/go_collector.go | 20 +- .../prometheus/go_collector_go116.go | 17 +- ...lector_go117.go => go_collector_latest.go} | 315 +- .../client_golang/prometheus/histogram.go | 1011 +- .../prometheus/internal/almost_equal.go | 60 + .../prometheus/internal/difflib.go | 654 + .../internal/go_collector_options.go | 32 + .../prometheus/internal/go_runtime_metrics.go | 18 +- .../prometheus/internal/metric.go | 28 +- .../client_golang/prometheus/labels.go | 81 +- .../client_golang/prometheus/metric.go | 118 +- .../client_golang/prometheus/num_threads.go | 25 + .../prometheus/num_threads_gopherjs.go | 22 + .../client_golang/prometheus/observer.go | 2 +- .../prometheus/process_collector.go | 10 +- .../prometheus/process_collector_js.go | 26 + .../prometheus/process_collector_other.go | 4 +- .../prometheus/promhttp/delegator.go | 18 +- .../client_golang/prometheus/promhttp/http.go | 20 +- .../prometheus/promhttp/instrument_client.go | 46 +- .../prometheus/promhttp/instrument_server.go | 140 +- .../prometheus/promhttp/option.go | 65 +- .../client_golang/prometheus/registry.go | 169 +- .../client_golang/prometheus/summary.go | 48 +- .../client_golang/prometheus/timer.go | 39 +- .../client_golang/prometheus/value.go | 55 +- .../client_golang/prometheus/vec.go | 140 +- .../client_golang/prometheus/vnext.go | 23 + .../client_golang/prometheus/wrap.go | 8 +- .../prometheus/client_model/go/metrics.pb.go | 1533 ++- .../prometheus/common/config/config.go | 26 +- .../prometheus/common/config/http_config.go | 549 +- .../prometheus/common/expfmt/decode.go | 39 +- .../prometheus/common/expfmt/encode.go | 13 +- .../prometheus/common/expfmt/expfmt.go | 26 +- .../prometheus/common/expfmt/fuzz.go | 5 +- .../common/expfmt/openmetrics_create.go | 26 +- .../prometheus/common/expfmt/text_create.go | 3 +- .../prometheus/common/expfmt/text_parse.go | 12 +- .../bitbucket.org/ww/goautoneg/autoneg.go | 22 +- .../prometheus/common/model/time.go | 91 +- .../prometheus/common/model/value.go | 246 +- .../prometheus/common/model/value_float.go | 100 + .../common/model/value_histogram.go | 178 + .../prometheus/common/model/value_type.go | 83 + .../prometheus/common/version/info.go | 21 +- .../prometheus/common/version/info_default.go | 25 + .../prometheus/common/version/info_go118.go | 67 + .../github.com/prometheus/procfs/.gitignore | 3 +- .../prometheus/procfs/.golangci.yml | 10 +- .../prometheus/procfs/CODE_OF_CONDUCT.md | 4 +- .../prometheus/procfs/CONTRIBUTING.md | 4 +- vendor/github.com/prometheus/procfs/Makefile | 10 +- .../prometheus/procfs/Makefile.common | 92 +- .../github.com/prometheus/procfs/SECURITY.md | 2 +- vendor/github.com/prometheus/procfs/arp.go | 45 +- .../github.com/prometheus/procfs/cpuinfo.go | 41 +- .../prometheus/procfs/cpuinfo_armx.go | 1 + .../prometheus/procfs/cpuinfo_loong64.go | 19 + .../prometheus/procfs/cpuinfo_mipsx.go | 1 + .../prometheus/procfs/cpuinfo_others.go | 4 +- .../prometheus/procfs/cpuinfo_ppcx.go | 1 + .../prometheus/procfs/cpuinfo_riscvx.go | 1 + .../prometheus/procfs/cpuinfo_s390x.go | 1 + .../prometheus/procfs/cpuinfo_x86.go | 1 + vendor/github.com/prometheus/procfs/doc.go | 51 +- .../prometheus/procfs/fixtures.ttar | 7673 ----------- .../prometheus/procfs/internal/fs/fs.go | 2 +- .../prometheus/procfs/internal/util/parse.go | 6 +- .../procfs/internal/util/readfile.go | 11 +- .../procfs/internal/util/sysreadfile.go | 8 +- .../internal/util/sysreadfile_compat.go | 3 +- vendor/github.com/prometheus/procfs/ipvs.go | 3 +- .../prometheus/procfs/kernel_random.go | 1 + .../github.com/prometheus/procfs/loadavg.go | 2 +- vendor/github.com/prometheus/procfs/mdstat.go | 10 +- .../prometheus/procfs/mountstats.go | 3 +- .../prometheus/procfs/net_conntrackstat.go | 12 +- .../github.com/prometheus/procfs/net_dev.go | 8 +- .../prometheus/procfs/net_ip_socket.go | 2 +- .../prometheus/procfs/net_protocols.go | 4 +- .../prometheus/procfs/net_softnet.go | 78 +- .../procfs/{xfrm.go => net_xfrm.go} | 9 +- .../github.com/prometheus/procfs/netstat.go | 59 +- vendor/github.com/prometheus/procfs/proc.go | 10 +- .../prometheus/procfs/proc_cgroup.go | 8 +- .../prometheus/procfs/proc_cgroups.go | 98 + .../prometheus/procfs/proc_environ.go | 2 +- .../prometheus/procfs/proc_fdinfo.go | 3 +- .../prometheus/procfs/proc_interrupts.go | 98 + .../prometheus/procfs/proc_limits.go | 2 +- .../github.com/prometheus/procfs/proc_maps.go | 12 +- .../prometheus/procfs/proc_netstat.go | 443 + .../github.com/prometheus/procfs/proc_psi.go | 14 +- .../prometheus/procfs/proc_smaps.go | 23 +- .../github.com/prometheus/procfs/proc_snmp.go | 353 + .../prometheus/procfs/proc_snmp6.go | 381 + .../github.com/prometheus/procfs/proc_stat.go | 15 +- .../prometheus/procfs/proc_status.go | 38 +- .../github.com/prometheus/procfs/proc_sys.go | 51 + .../github.com/prometheus/procfs/schedstat.go | 6 +- vendor/github.com/prometheus/procfs/slab.go | 2 +- .../github.com/prometheus/procfs/softirqs.go | 160 + vendor/github.com/prometheus/procfs/stat.go | 32 +- vendor/github.com/prometheus/procfs/thread.go | 79 + vendor/github.com/prometheus/procfs/vm.go | 10 +- .../github.com/prometheus/procfs/zoneinfo.go | 5 +- .../stretchr/testify/assert/assertions.go | 78 +- .../github.com/stretchr/testify/mock/mock.go | 12 +- .../github.com/vladimirvivien/gexe/README.md | 18 +- vendor/github.com/vladimirvivien/gexe/TODO.md | 17 +- .../vladimirvivien/gexe/exec/builder.go | 255 +- .../vladimirvivien/gexe/exec/cmd_parser.go | 8 +- .../vladimirvivien/gexe/exec/proc.go | 188 +- .../vladimirvivien/gexe/functions.go | 56 +- vendor/github.com/vladimirvivien/gexe/http.go | 13 + .../vladimirvivien/gexe/http/http_reader.go | 84 + .../vladimirvivien/gexe/http/http_response.go | 26 + .../vladimirvivien/gexe/http/http_writer.go | 98 + .../github.com/vladimirvivien/gexe/procs.go | 62 +- .../vladimirvivien/gexe/variables.go | 4 +- .../golang.org/x/net/http/httpproxy/proxy.go | 370 + .../golang.org/x/net/internal/socks/client.go | 168 + .../golang.org/x/net/internal/socks/socks.go | 317 + vendor/golang.org/x/net/proxy/dial.go | 54 + vendor/golang.org/x/net/proxy/direct.go | 31 + vendor/golang.org/x/net/proxy/per_host.go | 155 + vendor/golang.org/x/net/proxy/proxy.go | 149 + vendor/golang.org/x/net/proxy/socks5.go | 42 + vendor/golang.org/x/text/width/kind_string.go | 28 - .../golang.org/x/text/width/tables10.0.0.go | 1329 -- .../golang.org/x/text/width/tables11.0.0.go | 1341 -- .../golang.org/x/text/width/tables12.0.0.go | 1361 -- .../golang.org/x/text/width/tables13.0.0.go | 1362 -- .../golang.org/x/text/width/tables15.0.0.go | 1368 -- vendor/golang.org/x/text/width/tables9.0.0.go | 1297 -- vendor/golang.org/x/text/width/transform.go | 239 - vendor/golang.org/x/text/width/trieval.go | 30 - vendor/golang.org/x/text/width/width.go | 206 - vendor/golang.org/x/time/rate/rate.go | 123 +- vendor/golang.org/x/time/rate/sometimes.go | 67 + .../admissionregistration/v1/generated.pb.go | 484 +- .../admissionregistration/v1/generated.proto | 77 + .../api/admissionregistration/v1/types.go | 77 + .../v1/types_swagger_doc_generated.go | 14 +- .../v1/zz_generated.deepcopy.go | 26 + .../api/admissionregistration/v1alpha1/doc.go | 23 + .../v1alpha1/generated.pb.go | 4634 +++++++ .../v1alpha1/generated.proto | 609 + .../v1alpha1/register.go | 56 + .../admissionregistration/v1alpha1/types.go | 665 + .../v1alpha1/types_swagger_doc_generated.go | 204 + .../v1alpha1/zz_generated.deepcopy.go | 475 + .../v1beta1/generated.pb.go | 6119 +++++++-- .../v1beta1/generated.proto | 669 +- .../admissionregistration/v1beta1/register.go | 4 + .../admissionregistration/v1beta1/types.go | 737 +- .../v1beta1/types_swagger_doc_generated.go | 193 +- .../v1beta1/zz_generated.deepcopy.go | 463 +- .../zz_generated.prerelease-lifecycle.go | 72 + vendor/k8s.io/api/apidiscovery/v2beta1/doc.go | 24 + .../api/apidiscovery/v2beta1/generated.pb.go | 1744 +++ .../api/apidiscovery/v2beta1/generated.proto | 156 + .../api/apidiscovery/v2beta1/register.go | 56 + .../k8s.io/api/apidiscovery/v2beta1/types.go | 163 + .../v2beta1/zz_generated.deepcopy.go | 190 + .../zz_generated.prerelease-lifecycle.go | 58 + .../v1alpha1/generated.pb.go | 148 +- .../v1alpha1/generated.proto | 7 +- .../api/apiserverinternal/v1alpha1/types.go | 7 +- .../v1alpha1/types_swagger_doc_generated.go | 5 +- .../v1alpha1/zz_generated.deepcopy.go | 5 + vendor/k8s.io/api/apps/v1/generated.pb.go | 481 +- vendor/k8s.io/api/apps/v1/generated.proto | 40 +- vendor/k8s.io/api/apps/v1/types.go | 43 +- .../apps/v1/types_swagger_doc_generated.go | 28 +- .../api/apps/v1/zz_generated.deepcopy.go | 21 + .../k8s.io/api/apps/v1beta1/generated.pb.go | 459 +- .../k8s.io/api/apps/v1beta1/generated.proto | 89 +- vendor/k8s.io/api/apps/v1beta1/types.go | 89 +- .../v1beta1/types_swagger_doc_generated.go | 72 +- .../api/apps/v1beta1/zz_generated.deepcopy.go | 21 + .../k8s.io/api/apps/v1beta2/generated.pb.go | 498 +- .../k8s.io/api/apps/v1beta2/generated.proto | 42 +- vendor/k8s.io/api/apps/v1beta2/types.go | 42 +- .../v1beta2/types_swagger_doc_generated.go | 30 +- .../api/apps/v1beta2/zz_generated.deepcopy.go | 21 + .../api/authentication/v1/generated.pb.go | 511 +- .../api/authentication/v1/generated.proto | 22 +- .../k8s.io/api/authentication/v1/register.go | 1 + vendor/k8s.io/api/authentication/v1/types.go | 27 +- .../v1/types_swagger_doc_generated.go | 23 +- .../v1/zz_generated.deepcopy.go | 44 + .../k8s.io/api/authentication/v1alpha1/doc.go | 23 + .../authentication/v1alpha1/generated.pb.go | 567 + .../authentication/v1alpha1/generated.proto | 51 + .../api/authentication/v1alpha1/register.go | 51 + .../api/authentication/v1alpha1/types.go | 48 + .../v1alpha1/types_swagger_doc_generated.go | 49 + .../v1alpha1/zz_generated.deepcopy.go | 70 + .../zz_generated.prerelease-lifecycle.go | 40 + .../authentication/v1beta1/generated.pb.go | 476 +- .../authentication/v1beta1/generated.proto | 21 + .../api/authentication/v1beta1/register.go | 1 + .../api/authentication/v1beta1/types.go | 27 + .../v1beta1/types_swagger_doc_generated.go | 21 +- .../v1beta1/zz_generated.deepcopy.go | 44 + .../zz_generated.prerelease-lifecycle.go | 18 + .../v1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/autoscaling/v1/generated.proto | 42 +- vendor/k8s.io/api/autoscaling/v1/types.go | 79 +- .../v1/types_swagger_doc_generated.go | 42 +- .../k8s.io/api/autoscaling/v2/generated.proto | 20 +- vendor/k8s.io/api/autoscaling/v2/types.go | 61 +- .../v2/types_swagger_doc_generated.go | 22 +- .../api/autoscaling/v2beta1/generated.proto | 4 +- .../k8s.io/api/autoscaling/v2beta1/types.go | 4 +- .../v2beta1/types_swagger_doc_generated.go | 6 +- .../api/autoscaling/v2beta2/generated.proto | 24 +- .../k8s.io/api/autoscaling/v2beta2/types.go | 62 +- .../v2beta2/types_swagger_doc_generated.go | 26 +- vendor/k8s.io/api/batch/v1/generated.pb.go | 1424 +- vendor/k8s.io/api/batch/v1/generated.proto | 202 +- vendor/k8s.io/api/batch/v1/types.go | 287 +- .../batch/v1/types_swagger_doc_generated.go | 69 +- .../api/batch/v1/zz_generated.deepcopy.go | 121 + .../k8s.io/api/batch/v1beta1/generated.pb.go | 317 +- .../k8s.io/api/batch/v1beta1/generated.proto | 26 +- vendor/k8s.io/api/batch/v1beta1/register.go | 1 - vendor/k8s.io/api/batch/v1beta1/types.go | 31 +- .../v1beta1/types_swagger_doc_generated.go | 16 +- .../batch/v1beta1/zz_generated.deepcopy.go | 27 - .../zz_generated.prerelease-lifecycle.go | 18 - vendor/k8s.io/api/certificates/v1/types.go | 7 +- .../v1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/certificates/v1alpha1/doc.go | 24 + .../api/certificates/v1alpha1/generated.pb.go | 831 ++ .../api/certificates/v1alpha1/generated.proto | 103 + .../api/certificates/v1alpha1/register.go | 61 + .../k8s.io/api/certificates/v1alpha1/types.go | 106 + .../v1alpha1/types_swagger_doc_generated.go | 60 + .../v1alpha1/zz_generated.deepcopy.go | 102 + .../zz_generated.prerelease-lifecycle.go | 58 + .../api/certificates/v1beta1/generated.proto | 6 +- .../k8s.io/api/certificates/v1beta1/types.go | 12 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- .../api/coordination/v1/generated.proto | 6 +- vendor/k8s.io/api/coordination/v1/types.go | 6 +- .../v1/types_swagger_doc_generated.go | 8 +- .../api/coordination/v1beta1/generated.proto | 6 +- .../k8s.io/api/coordination/v1beta1/types.go | 6 +- .../v1beta1/types_swagger_doc_generated.go | 8 +- .../api/core/v1/annotation_key_constants.go | 27 +- vendor/k8s.io/api/core/v1/generated.pb.go | 5955 ++++++--- vendor/k8s.io/api/core/v1/generated.proto | 617 +- vendor/k8s.io/api/core/v1/toleration.go | 7 +- vendor/k8s.io/api/core/v1/types.go | 793 +- .../core/v1/types_swagger_doc_generated.go | 224 +- .../k8s.io/api/core/v1/well_known_labels.go | 4 + .../api/core/v1/zz_generated.deepcopy.go | 262 +- .../k8s.io/api/discovery/v1/generated.proto | 37 +- vendor/k8s.io/api/discovery/v1/types.go | 53 +- .../v1/types_swagger_doc_generated.go | 20 +- .../api/discovery/v1beta1/generated.proto | 22 +- vendor/k8s.io/api/discovery/v1beta1/types.go | 39 +- .../v1beta1/types_swagger_doc_generated.go | 18 +- .../events/v1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../api/extensions/v1beta1/generated.pb.go | 11064 ++++++---------- .../api/extensions/v1beta1/generated.proto | 365 +- .../k8s.io/api/extensions/v1beta1/register.go | 2 - vendor/k8s.io/api/extensions/v1beta1/types.go | 494 +- .../v1beta1/types_swagger_doc_generated.go | 215 +- .../v1beta1/zz_generated.deepcopy.go | 457 +- .../zz_generated.prerelease-lifecycle.go | 48 - .../api/flowcontrol/v1alpha1/generated.pb.go | 532 +- .../api/flowcontrol/v1alpha1/generated.proto | 75 +- .../k8s.io/api/flowcontrol/v1alpha1/types.go | 86 +- .../v1alpha1/types_swagger_doc_generated.go | 17 +- .../v1alpha1/zz_generated.deepcopy.go | 41 + .../zz_generated.prerelease-lifecycle.go | 8 +- .../api/flowcontrol/v1beta1/generated.pb.go | 530 +- .../api/flowcontrol/v1beta1/generated.proto | 75 +- .../k8s.io/api/flowcontrol/v1beta1/types.go | 90 +- .../v1beta1/types_swagger_doc_generated.go | 17 +- .../v1beta1/zz_generated.deepcopy.go | 41 + .../zz_generated.prerelease-lifecycle.go | 8 +- .../api/flowcontrol/v1beta2/generated.pb.go | 531 +- .../api/flowcontrol/v1beta2/generated.proto | 75 +- .../k8s.io/api/flowcontrol/v1beta2/types.go | 86 +- .../v1beta2/types_swagger_doc_generated.go | 17 +- .../v1beta2/zz_generated.deepcopy.go | 41 + .../zz_generated.prerelease-lifecycle.go | 28 + vendor/k8s.io/api/flowcontrol/v1beta3/doc.go | 25 + .../api/flowcontrol/v1beta3/generated.pb.go | 5663 ++++++++ .../api/flowcontrol/v1beta3/generated.proto | 515 + .../api/flowcontrol/v1beta3/register.go | 58 + .../k8s.io/api/flowcontrol/v1beta3/types.go | 659 + .../v1beta3/types_swagger_doc_generated.go | 274 + .../v1beta3/zz_generated.deepcopy.go | 583 + .../zz_generated.prerelease-lifecycle.go | 94 + .../k8s.io/api/networking/v1/generated.pb.go | 1316 +- .../k8s.io/api/networking/v1/generated.proto | 248 +- vendor/k8s.io/api/networking/v1/types.go | 295 +- .../v1/types_swagger_doc_generated.go | 143 +- .../networking/v1/zz_generated.deepcopy.go | 91 +- vendor/k8s.io/api/networking/v1alpha1/doc.go | 23 + .../api/networking/v1alpha1/generated.pb.go | 1858 +++ .../api/networking/v1alpha1/generated.proto | 155 + .../api/networking/v1alpha1/register.go | 62 + .../k8s.io/api/networking/v1alpha1/types.go | 163 + .../v1alpha1/types_swagger_doc_generated.go | 104 + .../networking/v1alpha1/well_known_labels.go | 33 + .../v1alpha1/zz_generated.deepcopy.go | 205 + .../zz_generated.prerelease-lifecycle.go | 94 + .../api/networking/v1beta1/generated.pb.go | 830 +- .../api/networking/v1beta1/generated.proto | 113 +- vendor/k8s.io/api/networking/v1beta1/types.go | 124 +- .../v1beta1/types_swagger_doc_generated.go | 89 +- .../v1beta1/zz_generated.deepcopy.go | 67 + vendor/k8s.io/api/node/v1/generated.proto | 10 +- vendor/k8s.io/api/node/v1/types.go | 12 +- .../node/v1/types_swagger_doc_generated.go | 12 +- .../k8s.io/api/node/v1alpha1/generated.proto | 14 +- vendor/k8s.io/api/node/v1alpha1/types.go | 16 +- .../v1alpha1/types_swagger_doc_generated.go | 14 +- .../k8s.io/api/node/v1beta1/generated.proto | 12 +- vendor/k8s.io/api/node/v1beta1/types.go | 14 +- .../v1beta1/types_swagger_doc_generated.go | 12 +- vendor/k8s.io/api/policy/v1/generated.pb.go | 150 +- vendor/k8s.io/api/policy/v1/generated.proto | 28 + vendor/k8s.io/api/policy/v1/types.go | 48 + .../policy/v1/types_swagger_doc_generated.go | 11 +- .../api/policy/v1/zz_generated.deepcopy.go | 5 + .../k8s.io/api/policy/v1beta1/generated.pb.go | 287 +- .../k8s.io/api/policy/v1beta1/generated.proto | 29 +- vendor/k8s.io/api/policy/v1beta1/types.go | 49 +- .../v1beta1/types_swagger_doc_generated.go | 13 +- .../policy/v1beta1/zz_generated.deepcopy.go | 5 + vendor/k8s.io/api/rbac/v1/generated.proto | 4 +- vendor/k8s.io/api/rbac/v1/types.go | 4 +- .../rbac/v1/types_swagger_doc_generated.go | 8 +- .../k8s.io/api/rbac/v1alpha1/generated.proto | 2 +- vendor/k8s.io/api/rbac/v1alpha1/types.go | 2 +- .../v1alpha1/types_swagger_doc_generated.go | 4 +- .../k8s.io/api/rbac/v1beta1/generated.proto | 2 +- vendor/k8s.io/api/rbac/v1beta1/types.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- vendor/k8s.io/api/resource/v1alpha2/doc.go | 24 + .../api/resource/v1alpha2/generated.pb.go | 4817 +++++++ .../api/resource/v1alpha2/generated.proto | 400 + .../k8s.io/api/resource/v1alpha2/register.go | 63 + vendor/k8s.io/api/resource/v1alpha2/types.go | 462 + .../v1alpha2/types_swagger_doc_generated.go | 232 + .../v1alpha2/zz_generated.deepcopy.go | 498 + .../k8s.io/api/scheduling/v1/generated.proto | 4 +- vendor/k8s.io/api/scheduling/v1/types.go | 4 +- .../v1/types_swagger_doc_generated.go | 6 +- .../api/scheduling/v1alpha1/generated.proto | 4 +- .../k8s.io/api/scheduling/v1alpha1/types.go | 4 +- .../v1alpha1/types_swagger_doc_generated.go | 6 +- .../api/scheduling/v1beta1/generated.proto | 4 +- vendor/k8s.io/api/scheduling/v1beta1/types.go | 4 +- .../v1beta1/types_swagger_doc_generated.go | 6 +- vendor/k8s.io/api/storage/v1/generated.pb.go | 246 +- vendor/k8s.io/api/storage/v1/generated.proto | 145 +- vendor/k8s.io/api/storage/v1/types.go | 151 +- .../storage/v1/types_swagger_doc_generated.go | 81 +- .../api/storage/v1/zz_generated.deepcopy.go | 5 + .../api/storage/v1alpha1/generated.proto | 38 +- vendor/k8s.io/api/storage/v1alpha1/types.go | 41 +- .../v1alpha1/types_swagger_doc_generated.go | 38 +- .../api/storage/v1beta1/generated.pb.go | 246 +- .../api/storage/v1beta1/generated.proto | 131 +- vendor/k8s.io/api/storage/v1beta1/types.go | 138 +- .../v1beta1/types_swagger_doc_generated.go | 77 +- .../storage/v1beta1/zz_generated.deepcopy.go | 5 + .../apimachinery/pkg/api/equality/semantic.go | 49 + .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 1 - .../apimachinery/pkg/api/errors/errors.go | 30 +- .../apimachinery/pkg/api/meta/conditions.go | 6 +- .../apimachinery/pkg/api/meta/errors.go | 35 +- .../k8s.io/apimachinery/pkg/api/meta/help.go | 89 +- .../k8s.io/apimachinery/pkg/api/meta/meta.go | 5 +- .../apimachinery/pkg/api/resource/OWNERS | 1 - .../pkg/api/resource/generated.proto | 27 +- .../apimachinery/pkg/api/resource/quantity.go | 37 +- .../apimachinery/pkg/api/resource/suffix.go | 2 +- .../apimachinery/pkg/api/validation/doc.go | 18 + .../pkg/api/validation/generic.go | 88 + .../pkg/api/validation/objectmeta.go | 265 + .../pkg/apis/meta/internalversion/defaults.go | 38 + .../pkg/apis/meta/internalversion/types.go | 25 + .../zz_generated.conversion.go | 2 + .../internalversion/zz_generated.deepcopy.go | 5 + .../pkg/apis/meta/v1/generated.pb.go | 437 +- .../pkg/apis/meta/v1/generated.proto | 101 +- .../apimachinery/pkg/apis/meta/v1/helpers.go | 2 +- .../apimachinery/pkg/apis/meta/v1/meta.go | 6 - .../pkg/apis/meta/v1/micro_time_proto.go | 10 +- .../apimachinery/pkg/apis/meta/v1/types.go | 132 +- .../meta/v1/types_swagger_doc_generated.go | 26 +- .../apis/meta/v1/unstructured/unstructured.go | 17 +- .../meta/v1/unstructured/unstructured_list.go | 9 + .../pkg/apis/meta/v1/validation/validation.go | 320 + .../apis/meta/v1/zz_generated.conversion.go | 7 + .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 5 + .../v1beta1/types_swagger_doc_generated.go | 2 +- .../apimachinery/pkg/conversion/converter.go | 8 +- .../apimachinery/pkg/conversion/deep_equal.go | 11 + .../apimachinery/pkg/conversion/helper.go | 2 +- .../pkg/conversion/queryparams/convert.go | 4 +- .../k8s.io/apimachinery/pkg/labels/labels.go | 2 + .../apimachinery/pkg/labels/selector.go | 146 +- .../apimachinery/pkg/runtime/allocator.go | 14 +- .../k8s.io/apimachinery/pkg/runtime/codec.go | 14 +- .../apimachinery/pkg/runtime/codec_check.go | 2 +- .../apimachinery/pkg/runtime/converter.go | 20 +- .../apimachinery/pkg/runtime/generated.proto | 59 +- .../apimachinery/pkg/runtime/interfaces.go | 5 + .../pkg/runtime/schema/group_version.go | 6 +- .../k8s.io/apimachinery/pkg/runtime/scheme.go | 8 +- .../pkg/runtime/serializer/codec_factory.go | 2 +- .../serializer/versioning/versioning.go | 2 +- .../k8s.io/apimachinery/pkg/runtime/splice.go | 76 + .../k8s.io/apimachinery/pkg/runtime/types.go | 61 +- .../apimachinery/pkg/types/namespacedname.go | 11 + .../k8s.io/apimachinery/pkg/types/nodename.go | 24 +- .../apimachinery/pkg/util/cache/expiring.go | 12 +- .../k8s.io/apimachinery/pkg/util/diff/diff.go | 39 +- .../k8s.io/apimachinery/pkg/util/dump/dump.go | 54 + .../apimachinery/pkg/util/errors/errors.go | 2 +- .../apimachinery/pkg/util/framer/framer.go | 10 +- .../apimachinery/pkg/util/httpstream/doc.go | 19 + .../pkg/util/httpstream/httpstream.go | 159 + .../pkg/util/httpstream/spdy/connection.go | 204 + .../pkg/util/httpstream/spdy/roundtripper.go | 370 + .../pkg/util/httpstream/spdy/upgrade.go | 120 + .../apimachinery/pkg/util/intstr/intstr.go | 9 +- .../pkg/util/managedfields/endpoints.yaml | 7018 ++++++++++ .../pkg/util/managedfields/extract.go | 2 +- .../pkg/util/managedfields/fieldmanager.go | 57 + .../managedfields/internal/atmostevery.go | 60 + .../internal/buildmanagerinfo.go | 74 + .../managedfields/internal/capmanagers.go | 133 + .../util/managedfields/internal/conflict.go | 89 + .../managedfields/internal/fieldmanager.go | 209 + .../pkg/util/managedfields/internal/fields.go | 47 + .../managedfields/internal/lastapplied.go | 50 + .../internal/lastappliedmanager.go | 171 + .../internal/lastappliedupdater.go | 102 + .../managedfields/internal/managedfields.go | 248 + .../internal/managedfieldsupdater.go | 82 + .../util/managedfields/internal/manager.go | 52 + .../managedfields/internal/pathelement.go | 140 + .../managedfields/internal/skipnonapplied.go | 89 + .../util/managedfields/internal/stripmeta.go | 90 + .../managedfields/internal/structuredmerge.go | 186 + .../managedfields/internal/typeconverter.go | 193 + .../managedfields/internal/versioncheck.go | 52 + .../internal/versionconverter.go | 123 + .../pkg/util/managedfields/node.yaml | 261 + .../pkg/util/managedfields/pod.yaml | 121 + .../pkg/util/managedfields/scalehandler.go | 174 + .../pkg/util/managedfields/typeconverter.go | 47 + .../apimachinery/pkg/util/mergepatch/util.go | 6 +- .../apimachinery/pkg/util/net/interface.go | 2 +- .../apimachinery/pkg/util/net/port_split.go | 13 +- .../k8s.io/apimachinery/pkg/util/net/util.go | 7 + .../pkg/util/remotecommand/constants.go | 53 + .../apimachinery/pkg/util/runtime/runtime.go | 2 +- .../k8s.io/apimachinery/pkg/util/sets/byte.go | 150 +- .../k8s.io/apimachinery/pkg/util/sets/doc.go | 7 +- .../apimachinery/pkg/util/sets/empty.go | 4 +- .../k8s.io/apimachinery/pkg/util/sets/int.go | 150 +- .../apimachinery/pkg/util/sets/int32.go | 150 +- .../apimachinery/pkg/util/sets/int64.go | 150 +- .../apimachinery/pkg/util/sets/ordered.go | 53 + .../k8s.io/apimachinery/pkg/util/sets/set.go | 241 + .../apimachinery/pkg/util/sets/string.go | 150 +- .../pkg/util/strategicpatch/OWNERS | 1 + .../pkg/util/strategicpatch/meta.go | 4 +- .../pkg/util/strategicpatch/patch.go | 65 +- .../pkg/util/validation/field/errors.go | 6 +- .../pkg/util/validation/validation.go | 10 +- .../apimachinery/pkg/util/wait/backoff.go | 502 + .../apimachinery/pkg/util/wait/delay.go | 51 + .../apimachinery/pkg/util/wait/error.go | 96 + .../k8s.io/apimachinery/pkg/util/wait/loop.go | 97 + .../k8s.io/apimachinery/pkg/util/wait/poll.go | 315 + .../apimachinery/pkg/util/wait/timer.go | 121 + .../k8s.io/apimachinery/pkg/util/wait/wait.go | 642 +- .../k8s.io/apimachinery/pkg/watch/filter.go | 1 - vendor/k8s.io/apimachinery/pkg/watch/mux.go | 53 +- .../third_party/forked/golang/json/fields.go | 4 +- .../third_party/forked/golang/netutil/addr.go | 28 + .../forked/golang/reflect/deep_equal.go | 84 +- .../v1/matchcondition.go | 48 + .../v1/mutatingwebhook.go | 14 + .../v1/validatingwebhook.go | 14 + .../v1alpha1/auditannotation.go | 48 + .../v1alpha1/expressionwarning.go | 48 + .../v1alpha1/matchcondition.go | 48 + .../v1alpha1/matchresources.go | 90 + .../v1alpha1/namedrulewithoperations.go | 95 + .../v1alpha1/paramkind.go | 48 + .../v1alpha1/paramref.go | 71 + .../v1alpha1/typechecking.go | 44 + .../v1alpha1/validatingadmissionpolicy.go | 256 + .../validatingadmissionpolicybinding.go | 247 + .../validatingadmissionpolicybindingspec.go | 72 + .../v1alpha1/validatingadmissionpolicyspec.go | 117 + .../validatingadmissionpolicystatus.go | 66 + .../v1alpha1/validation.go | 70 + .../v1alpha1/variable.go | 48 + .../v1beta1/auditannotation.go | 48 + .../v1beta1/expressionwarning.go | 48 + .../v1beta1/matchcondition.go | 48 + .../v1beta1/matchresources.go | 90 + .../v1beta1/mutatingwebhook.go | 29 +- ...erations.go => namedrulewithoperations.go} | 37 +- .../v1beta1/paramkind.go | 48 + .../admissionregistration/v1beta1/paramref.go | 71 + .../admissionregistration/v1beta1/rule.go | 76 - .../v1beta1/typechecking.go | 44 + .../v1beta1/validatingadmissionpolicy.go | 256 + .../validatingadmissionpolicybinding.go | 247 + .../validatingadmissionpolicybindingspec.go | 72 + .../v1beta1/validatingadmissionpolicyspec.go | 117 + .../validatingadmissionpolicystatus.go | 66 + .../v1beta1/validatingwebhook.go | 29 +- .../v1beta1/validation.go | 70 + .../admissionregistration/v1beta1/variable.go | 48 + .../v1alpha1/serverstorageversion.go | 11 + .../apps/v1/statefulsetordinals.go | 39 + .../apps/v1/statefulsetspec.go | 9 + .../v1beta1/statefulsetordinals.go} | 20 +- .../apps/v1beta1/statefulsetspec.go | 9 + .../apps/v1beta2/statefulsetordinals.go | 39 + .../apps/v1beta2/statefulsetspec.go | 9 + .../applyconfigurations/batch/v1/jobspec.go | 42 +- .../applyconfigurations/batch/v1/jobstatus.go | 18 + .../batch/v1/podfailurepolicy.go | 44 + .../podfailurepolicyonexitcodesrequirement.go | 63 + .../podfailurepolicyonpodconditionspattern.go | 52 + .../batch/v1/podfailurepolicyrule.go | 66 + .../v1alpha1/clustertrustbundle.go | 247 + .../v1alpha1/clustertrustbundlespec.go | 48 + .../core/v1/claimsource.go | 48 + .../applyconfigurations/core/v1/container.go | 67 +- .../core/v1/containerresizepolicy.go | 52 + .../core/v1/containerstatus.go | 40 +- .../core/v1/csipersistentvolumesource.go | 9 + .../core/v1/ephemeralcontainer.go | 21 + .../core/v1/ephemeralcontainercommon.go | 67 +- .../applyconfigurations/core/v1/hostip.go | 39 + .../core/v1/persistentvolumeclaimspec.go | 4 +- .../core/v1/persistentvolumeclaimstatus.go | 28 +- .../core/v1/persistentvolumestatus.go | 16 +- .../v1/podresourceclaim.go} | 30 +- .../core/v1/podresourceclaimstatus.go | 48 + .../core/v1/podschedulinggate.go | 39 + .../applyconfigurations/core/v1/podspec.go | 37 + .../applyconfigurations/core/v1/podstatus.go | 63 +- .../v1/resourceclaim.go} | 14 +- .../core/v1/resourcerequirements.go | 18 +- .../core/v1/servicespec.go | 12 +- .../core/v1/topologyspreadconstraint.go | 39 +- .../core/v1/typedobjectreference.go | 66 + .../extensions/v1beta1/allowedhostpath.go | 48 - .../v1beta1/fsgroupstrategyoptions.go | 57 - .../extensions/v1beta1/hostportrange.go | 48 - .../extensions/v1beta1/idrange.go | 48 - .../v1beta1/ingressloadbalanceringress.go | 62 + .../v1beta1/ingressloadbalancerstatus.go | 44 + .../extensions/v1beta1/ingressportstatus.go | 61 + .../extensions/v1beta1/ingressstatus.go | 8 +- .../extensions/v1beta1/networkpolicy.go | 11 +- .../v1beta1/podsecuritypolicyspec.go | 285 - .../v1beta1/runasgroupstrategyoptions.go | 57 - .../v1beta1/runasuserstrategyoptions.go | 57 - .../v1beta1/runtimeclassstrategyoptions.go | 50 - .../v1beta1/selinuxstrategyoptions.go | 53 - .../supplementalgroupsstrategyoptions.go | 57 - .../exemptprioritylevelconfiguration.go | 48 + .../limitedprioritylevelconfiguration.go | 18 + .../prioritylevelconfigurationspec.go | 9 + .../exemptprioritylevelconfiguration.go | 48 + .../limitedprioritylevelconfiguration.go | 18 + .../v1beta1/prioritylevelconfigurationspec.go | 9 + .../exemptprioritylevelconfiguration.go | 48 + .../limitedprioritylevelconfiguration.go | 18 + .../v1beta2/prioritylevelconfigurationspec.go | 9 + .../exemptprioritylevelconfiguration.go | 48 + .../v1beta3/flowdistinguishermethod.go | 43 + .../flowcontrol/v1beta3/flowschema.go | 256 + .../v1beta3/flowschemacondition.go | 80 + .../flowcontrol/v1beta3/flowschemaspec.go | 71 + .../v1beta3/flowschemastatus.go} | 20 +- .../flowcontrol/v1beta3/groupsubject.go | 39 + .../limitedprioritylevelconfiguration.go | 66 + .../flowcontrol/v1beta3/limitresponse.go | 52 + .../v1beta3/nonresourcepolicyrule.go | 52 + .../v1beta3/policyruleswithsubjects.go | 72 + .../v1beta3/prioritylevelconfiguration.go | 256 + .../prioritylevelconfigurationcondition.go | 80 + .../prioritylevelconfigurationreference.go | 39 + .../v1beta3/prioritylevelconfigurationspec.go | 61 + .../prioritylevelconfigurationstatus.go} | 20 +- .../v1beta3/queuingconfiguration.go | 57 + .../flowcontrol/v1beta3/resourcepolicyrule.go | 83 + .../v1beta3/serviceaccountsubject.go | 48 + .../flowcontrol/v1beta3/subject.go | 70 + .../flowcontrol/v1beta3/usersubject.go | 39 + .../applyconfigurations/internal/internal.go | 2448 +++- .../v1/ingressloadbalanceringress.go | 62 + .../v1/ingressloadbalancerstatus.go | 44 + .../networking/v1/ingressportstatus.go | 61 + .../networking/v1/ingressstatus.go | 8 +- .../networking/v1/networkpolicy.go | 11 +- .../networking/v1alpha1/clustercidr.go | 247 + .../networking/v1alpha1/clustercidrspec.go | 70 + .../v1alpha1/ipaddress.go} | 86 +- .../networking/v1alpha1/ipaddressspec.go | 39 + .../networking/v1alpha1/parentreference.go | 79 + .../v1beta1/ingressloadbalanceringress.go | 62 + .../v1beta1/ingressloadbalancerstatus.go | 44 + .../networking/v1beta1/ingressportstatus.go | 61 + .../networking/v1beta1/ingressstatus.go | 8 +- .../policy/v1/poddisruptionbudgetspec.go | 16 +- .../policy/v1beta1/poddisruptionbudgetspec.go | 16 +- .../resource/v1alpha2/allocationresult.go | 66 + .../resource/v1alpha2/podschedulingcontext.go | 258 + .../v1alpha2/podschedulingcontextspec.go | 50 + .../v1alpha2/podschedulingcontextstatus.go | 44 + .../resource/v1alpha2/resourceclaim.go | 258 + .../resourceclaimconsumerreference.go | 70 + .../resourceclaimparametersreference.go | 57 + .../v1alpha2/resourceclaimschedulingstatus.go | 50 + .../resource/v1alpha2/resourceclaimspec.go | 61 + .../resource/v1alpha2/resourceclaimstatus.go | 71 + .../v1alpha2/resourceclaimtemplate.go | 249 + .../v1alpha2/resourceclaimtemplatespec.go | 188 + .../resource/v1alpha2/resourceclass.go | 266 + .../resourceclassparametersreference.go | 66 + .../resource/v1alpha2/resourcehandle.go | 48 + .../storage/v1/csidriverspec.go | 9 + .../storage/v1beta1/csidriverspec.go | 9 + .../discovery/aggregated_discovery.go | 156 + .../client-go/discovery/discovery_client.go | 306 +- vendor/k8s.io/client-go/dynamic/interface.go | 2 + vendor/k8s.io/client-go/dynamic/simple.go | 113 +- .../admissionregistration/interface.go | 8 + .../v1alpha1/interface.go | 52 + .../v1alpha1/validatingadmissionpolicy.go | 89 + .../validatingadmissionpolicybinding.go | 89 + .../v1beta1/interface.go | 14 + .../v1beta1/validatingadmissionpolicy.go | 89 + .../validatingadmissionpolicybinding.go | 89 + .../informers/certificates/interface.go | 8 + .../v1alpha1/clustertrustbundle.go | 89 + .../certificates/v1alpha1/interface.go | 45 + vendor/k8s.io/client-go/informers/doc.go | 18 + .../informers/extensions/v1beta1/interface.go | 7 - vendor/k8s.io/client-go/informers/factory.go | 87 +- .../informers/flowcontrol/interface.go | 8 + .../v1beta3/flowschema.go} | 44 +- .../flowcontrol/v1beta3/interface.go | 52 + .../v1beta3/prioritylevelconfiguration.go | 89 + vendor/k8s.io/client-go/informers/generic.go | 49 +- .../informers/networking/interface.go | 8 + .../networking/v1alpha1/clustercidr.go | 89 + .../networking/v1alpha1/interface.go | 52 + .../networking/v1alpha1/ipaddress.go | 89 + .../client-go/informers/resource/interface.go | 46 + .../informers/resource/v1alpha2/interface.go | 66 + .../resource/v1alpha2/podschedulingcontext.go | 90 + .../resource/v1alpha2/resourceclaim.go | 90 + .../v1alpha2/resourceclaimtemplate.go | 90 + .../resource/v1alpha2/resourceclass.go | 89 + .../k8s.io/client-go/kubernetes/clientset.go | 171 +- vendor/k8s.io/client-go/kubernetes/doc.go | 7 +- .../client-go/kubernetes/scheme/register.go | 26 +- .../v1alpha1/admissionregistration_client.go | 112 + .../admissionregistration/v1alpha1/doc.go | 20 + .../v1alpha1/generated_expansion.go | 23 + .../v1alpha1/validatingadmissionpolicy.go | 243 + .../validatingadmissionpolicybinding.go | 197 + .../v1beta1/admissionregistration_client.go | 10 + .../v1beta1/generated_expansion.go | 4 + .../v1beta1/validatingadmissionpolicy.go | 243 + .../validatingadmissionpolicybinding.go | 197 + .../v1/authentication_client.go | 5 + .../authentication/v1/generated_expansion.go | 2 + .../authentication/v1/selfsubjectreview.go | 64 + .../v1alpha1/authentication_client.go | 107 + .../typed/authentication/v1alpha1/doc.go | 20 + .../v1alpha1/generated_expansion.go | 21 + .../v1alpha1/selfsubjectreview.go | 64 + .../v1beta1/authentication_client.go | 5 + .../v1beta1/generated_expansion.go | 2 + .../v1beta1/selfsubjectreview.go | 64 + .../v1alpha1/certificates_client.go | 107 + .../v1alpha1/clustertrustbundle.go | 197 + .../typed/certificates/v1alpha1/doc.go | 20 + .../v1alpha1/generated_expansion.go | 21 + .../typed/events/v1beta1/event_expansion.go | 2 +- .../extensions/v1beta1/extensions_client.go | 5 - .../extensions/v1beta1/generated_expansion.go | 2 - .../typed/extensions/v1beta1/networkpolicy.go | 48 - .../extensions/v1beta1/podsecuritypolicy.go | 197 - .../typed/flowcontrol/v1beta3/doc.go | 20 + .../flowcontrol/v1beta3/flowcontrol_client.go | 112 + .../typed/flowcontrol/v1beta3/flowschema.go | 243 + .../v1beta3/generated_expansion.go | 23 + .../v1beta3/prioritylevelconfiguration.go | 243 + .../typed/networking/v1/networkpolicy.go | 48 - .../typed/networking/v1alpha1/clustercidr.go | 197 + .../typed/networking/v1alpha1/doc.go | 20 + .../v1alpha1/generated_expansion.go | 23 + .../typed/networking/v1alpha1/ipaddress.go | 197 + .../networking/v1alpha1/networking_client.go | 112 + .../kubernetes/typed/resource/v1alpha2/doc.go | 20 + .../resource/v1alpha2/generated_expansion.go | 27 + .../resource/v1alpha2/podschedulingcontext.go | 256 + .../resource/v1alpha2/resource_client.go | 122 + .../typed/resource/v1alpha2/resourceclaim.go | 256 + .../v1alpha2/resourceclaimtemplate.go | 208 + .../typed/resource/v1alpha2/resourceclass.go | 197 + .../v1alpha1/expansion_generated.go | 27 + .../v1alpha1/validatingadmissionpolicy.go | 68 + .../validatingadmissionpolicybinding.go | 68 + .../v1beta1/expansion_generated.go | 8 + .../v1beta1/validatingadmissionpolicy.go | 68 + .../validatingadmissionpolicybinding.go | 68 + .../v1alpha1/clustertrustbundle.go | 68 + .../v1alpha1/expansion_generated.go | 23 + .../extensions/v1beta1/expansion_generated.go | 4 - .../extensions/v1beta1/podsecuritypolicy.go | 68 - .../v1beta3/expansion_generated.go | 27 + .../listers/flowcontrol/v1beta3/flowschema.go | 68 + .../v1beta3/prioritylevelconfiguration.go | 68 + .../networking/v1alpha1/clustercidr.go | 68 + .../v1alpha1/expansion_generated.go | 27 + .../listers/networking/v1alpha1/ipaddress.go | 68 + .../resource/v1alpha2/expansion_generated.go | 47 + .../resource/v1alpha2/podschedulingcontext.go | 99 + .../resource/v1alpha2/resourceclaim.go | 99 + .../v1alpha2/resourceclaimtemplate.go | 99 + .../resource/v1alpha2/resourceclass.go | 68 + vendor/k8s.io/client-go/openapi/OWNERS | 4 + vendor/k8s.io/client-go/openapi/client.go | 7 +- .../k8s.io/client-go/openapi/groupversion.go | 47 +- .../k8s.io/client-go/openapi/typeconverter.go | 48 + .../pkg/apis/clientauthentication/types.go | 5 + .../pkg/apis/clientauthentication/v1/types.go | 5 + .../v1/zz_generated.conversion.go | 2 + .../clientauthentication/v1beta1/types.go | 5 + .../v1beta1/zz_generated.conversion.go | 2 + vendor/k8s.io/client-go/pkg/version/base.go | 3 +- .../plugin/pkg/client/auth/exec/exec.go | 39 +- vendor/k8s.io/client-go/rest/client.go | 13 +- vendor/k8s.io/client-go/rest/config.go | 20 +- vendor/k8s.io/client-go/rest/exec.go | 9 +- vendor/k8s.io/client-go/rest/plugin.go | 7 +- vendor/k8s.io/client-go/rest/request.go | 235 +- vendor/k8s.io/client-go/rest/transport.go | 5 +- vendor/k8s.io/client-go/rest/url_utils.go | 4 +- vendor/k8s.io/client-go/rest/warnings.go | 6 +- vendor/k8s.io/client-go/rest/with_retry.go | 61 +- .../k8s.io/client-go/restmapper/shortcut.go | 13 +- .../k8s.io/client-go/tools/auth/clientauth.go | 33 +- vendor/k8s.io/client-go/tools/cache/OWNERS | 4 +- .../client-go/tools/cache/controller.go | 165 +- .../client-go/tools/cache/delta_fifo.go | 180 +- .../client-go/tools/cache/expiration_cache.go | 17 +- vendor/k8s.io/client-go/tools/cache/fifo.go | 23 +- vendor/k8s.io/client-go/tools/cache/index.go | 10 +- .../k8s.io/client-go/tools/cache/listers.go | 20 +- .../client-go/tools/cache/object-names.go | 65 + .../k8s.io/client-go/tools/cache/reflector.go | 738 +- .../tools/cache/retry_with_deadline.go | 78 + .../client-go/tools/cache/shared_informer.go | 325 +- vendor/k8s.io/client-go/tools/cache/store.go | 44 +- .../client-go/tools/cache/synctrack/lazy.go | 83 + .../tools/cache/synctrack/synctrack.go | 120 + .../tools/cache/thread_safe_store.go | 307 +- .../client-go/tools/clientcmd/api/helpers.go | 91 +- .../client-go/tools/clientcmd/api/types.go | 19 +- .../client-go/tools/clientcmd/api/v1/types.go | 5 + .../api/v1/zz_generated.conversion.go | 2 + .../client-go/tools/clientcmd/auth_loaders.go | 5 +- .../tools/clientcmd/client_config.go | 7 +- .../client-go/tools/clientcmd/loader.go | 39 +- .../client-go/tools/clientcmd/overrides.go | 42 +- .../client-go/tools/clientcmd/validation.go | 15 +- .../k8s.io/client-go/tools/metrics/metrics.go | 65 + vendor/k8s.io/client-go/tools/pager/pager.go | 41 +- .../k8s.io/client-go/tools/reference/ref.go | 2 +- .../client-go/tools/remotecommand/doc.go | 20 + .../tools/remotecommand/errorstream.go | 54 + .../client-go/tools/remotecommand/reader.go | 41 + .../tools/remotecommand/remotecommand.go | 182 + .../client-go/tools/remotecommand/resize.go | 33 + .../client-go/tools/remotecommand/v1.go | 159 + .../client-go/tools/remotecommand/v2.go | 199 + .../client-go/tools/remotecommand/v3.go | 111 + .../client-go/tools/remotecommand/v4.go | 119 + vendor/k8s.io/client-go/transport/cache.go | 34 +- .../k8s.io/client-go/transport/cache_go118.go | 24 + vendor/k8s.io/client-go/transport/config.go | 21 +- .../client-go/transport/round_trippers.go | 10 +- .../k8s.io/client-go/transport/spdy/spdy.go | 103 + .../client-go/transport/token_source.go | 4 +- .../k8s.io/client-go/transport/transport.go | 27 +- vendor/k8s.io/client-go/util/cert/cert.go | 44 +- vendor/k8s.io/client-go/util/cert/io.go | 7 +- vendor/k8s.io/client-go/util/exec/exec.go | 52 + vendor/k8s.io/client-go/util/keyutil/key.go | 9 +- .../util/workqueue/delaying_queue.go | 61 +- vendor/k8s.io/client-go/util/workqueue/doc.go | 14 +- .../client-go/util/workqueue/metrics.go | 9 +- .../k8s.io/client-go/util/workqueue/queue.go | 53 +- .../util/workqueue/rate_limiting_queue.go | 60 +- vendor/k8s.io/klog/v2/OWNERS | 1 + vendor/k8s.io/klog/v2/README.md | 1 - vendor/k8s.io/klog/v2/contextual.go | 69 +- vendor/k8s.io/klog/v2/format.go | 65 + .../k8s.io/klog/v2/internal/buffer/buffer.go | 75 +- vendor/k8s.io/klog/v2/internal/dbg/dbg.go | 42 + .../klog/v2/internal/serialize/keyvalues.go | 310 +- vendor/k8s.io/klog/v2/k8s_references.go | 120 +- vendor/k8s.io/klog/v2/klog.go | 529 +- vendor/k8s.io/klog/v2/klogr.go | 18 +- .../k8s.io/kube-openapi/pkg/cached/cache.go | 312 + .../k8s.io/kube-openapi/pkg/common/common.go | 50 +- .../kube-openapi/pkg/handler3/handler.go | 247 +- .../k8s.io/kube-openapi/pkg/internal/flags.go | 24 + .../pkg/internal/handler/handler_cache.go | 57 - .../pkg/internal/serialization.go | 65 + .../go-json-experiment/json}/AUTHORS | 2 +- .../go-json-experiment/json}/CONTRIBUTORS | 2 +- .../go-json-experiment/json}/LICENSE | 2 +- .../go-json-experiment/json/README.md | 321 + .../go-json-experiment/json/arshal.go | 513 + .../go-json-experiment/json/arshal_any.go | 238 + .../go-json-experiment/json/arshal_default.go | 1485 +++ .../go-json-experiment/json/arshal_funcs.go | 387 + .../go-json-experiment/json/arshal_inlined.go | 213 + .../go-json-experiment/json/arshal_methods.go | 229 + .../go-json-experiment/json/arshal_time.go | 241 + .../go-json-experiment/json/decode.go | 1655 +++ .../go-json-experiment/json/doc.go | 182 + .../go-json-experiment/json/encode.go | 1170 ++ .../go-json-experiment/json/errors.go | 183 + .../go-json-experiment/json/fields.go | 509 + .../go-json-experiment/json/fold.go | 56 + .../go-json-experiment/json/intern.go | 86 + .../go-json-experiment/json/pools.go | 182 + .../go-json-experiment/json/state.go | 747 ++ .../go-json-experiment/json/token.go | 522 + .../go-json-experiment/json/value.go | 381 + .../kube-openapi/pkg/schemaconv/openapi.go | 260 + .../pkg/schemaconv/proto_models.go | 178 + .../k8s.io/kube-openapi/pkg/schemaconv/smd.go | 292 +- .../k8s.io/kube-openapi/pkg/spec3/encoding.go | 22 +- .../k8s.io/kube-openapi/pkg/spec3/example.go | 25 +- .../pkg/spec3/external_documentation.go | 21 +- vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go | 254 + .../k8s.io/kube-openapi/pkg/spec3/header.go | 21 + .../kube-openapi/pkg/spec3/media_type.go | 22 +- .../kube-openapi/pkg/spec3/operation.go | 22 +- .../kube-openapi/pkg/spec3/parameter.go | 22 + vendor/k8s.io/kube-openapi/pkg/spec3/path.go | 82 +- .../kube-openapi/pkg/spec3/request_body.go | 23 +- .../k8s.io/kube-openapi/pkg/spec3/response.go | 121 +- .../pkg/spec3/security_requirement.go | 56 - .../kube-openapi/pkg/spec3/security_scheme.go | 2 +- .../k8s.io/kube-openapi/pkg/spec3/server.go | 41 +- vendor/k8s.io/kube-openapi/pkg/spec3/spec.go | 13 + .../kube-openapi/pkg/util/proto/document.go | 2 +- .../pkg/util/proto/document_v3.go | 6 +- .../kube-openapi/pkg/validation/spec/fuzz.go | 502 + .../pkg/validation/spec/gnostic.go | 1517 +++ .../pkg/validation/spec/header.go | 43 + .../kube-openapi/pkg/validation/spec/info.go | 45 + .../kube-openapi/pkg/validation/spec/items.go | 71 + .../pkg/validation/spec/operation.go | 50 + .../pkg/validation/spec/parameter.go | 93 +- .../pkg/validation/spec/path_item.go | 39 + .../kube-openapi/pkg/validation/spec/paths.go | 79 + .../kube-openapi/pkg/validation/spec/ref.go | 18 +- .../pkg/validation/spec/response.go | 55 +- .../pkg/validation/spec/responses.go | 106 +- .../pkg/validation/spec/schema.go | 118 + .../pkg/validation/spec/security_scheme.go | 28 + .../pkg/validation/spec/swagger.go | 171 +- .../kube-openapi/pkg/validation/spec/tag.go | 32 + vendor/k8s.io/utils/net/ipfamily.go | 181 + vendor/k8s.io/utils/net/net.go | 126 +- vendor/k8s.io/utils/net/port.go | 18 +- vendor/k8s.io/utils/pointer/OWNERS | 10 + vendor/k8s.io/utils/pointer/README.md | 3 + vendor/k8s.io/utils/pointer/pointer.go | 410 + vendor/k8s.io/utils/trace/trace.go | 49 +- vendor/modules.txt | 184 +- .../pkg/client/apiutil/apimachinery.go | 108 +- .../pkg/client/apiutil/dynamicrestmapper.go | 285 - .../pkg/client/apiutil/restmapper.go | 293 + .../controller-runtime/pkg/client/client.go | 393 +- ...ient_cache.go => client_rest_resources.go} | 27 +- .../controller-runtime/pkg/client/doc.go | 14 +- .../controller-runtime/pkg/client/dryrun.go | 50 +- .../pkg/client/interfaces.go | 91 +- .../pkg/client/metadata_client.go | 27 +- .../pkg/client/namespaced_client.go | 115 +- .../controller-runtime/pkg/client/options.go | 156 +- .../controller-runtime/pkg/client/patch.go | 2 +- .../controller-runtime/pkg/client/split.go | 141 - .../pkg/client/typed_client.go | 130 +- .../pkg/client/unstructured_client.go | 194 +- .../controller-runtime/pkg/client/watch.go | 36 +- .../controllerutil/controllerutil.go | 394 + .../pkg/controller/controllerutil/doc.go | 20 + .../pkg/internal/objectutil/objectutil.go | 78 - .../controller-runtime/pkg/log/deleg.go | 57 +- .../controller-runtime/pkg/log/log.go | 52 +- .../pkg/log/warning_handler.go | 2 +- .../e2e-framework/klient/client.go | 14 + .../e2e-framework/klient/conf/config.go | 31 +- .../e2e-framework/klient/decoder/decoder.go | 355 + .../klient/k8s/resources/resources.go | 175 +- .../e2e-framework/klient/k8s/watcher/watch.go | 132 + .../klient/wait/conditions/conditions.go | 90 +- .../e2e-framework/klient/wait/wait.go | 30 +- .../e2e-framework/pkg/env/action.go | 36 +- .../sigs.k8s.io/e2e-framework/pkg/env/env.go | 311 +- .../e2e-framework/pkg/envconf/config.go | 93 +- .../e2e-framework/pkg/envfuncs/kind_funcs.go | 186 +- .../pkg/envfuncs/provider_funcs.go | 189 + .../pkg/envfuncs/resource_funcs.go | 51 + .../e2e-framework/pkg/features/builder.go | 38 +- .../e2e-framework/pkg/features/feature.go | 37 +- .../e2e-framework/pkg/features/table.go | 19 +- .../e2e-framework/pkg/flags/flags.go | 174 +- .../e2e-framework/pkg/internal/types/types.go | 23 +- .../e2e-framework/support/kind/kind.go | 249 +- .../e2e-framework/support/types.go | 105 + .../e2e-framework/support/utils/command.go | 84 + .../internal/golang/encoding/json/decode.go | 55 +- .../internal/golang/encoding/json/encode.go | 71 +- .../internal/golang/encoding/json/fold.go | 5 +- .../internal/golang/encoding/json/fuzz.go | 9 +- .../golang/encoding/json/kubernetes_patch.go | 49 +- .../internal/golang/encoding/json/scanner.go | 4 +- .../internal/golang/encoding/json/stream.go | 9 +- .../internal/golang/encoding/json/tags.go | 16 +- vendor/sigs.k8s.io/json/json.go | 39 +- .../v4/merge/conflict.go | 121 + .../structured-merge-diff/v4/merge/update.go | 356 + .../v4/schema/elements.go | 131 +- .../structured-merge-diff/v4/schema/equals.go | 3 + .../v4/schema/schemaschema.go | 3 + .../structured-merge-diff/v4/typed/helpers.go | 6 +- .../structured-merge-diff/v4/typed/merge.go | 7 +- .../v4/typed/reconcile_schema.go | 2 +- .../structured-merge-diff/v4/typed/typed.go | 13 +- 1142 files changed, 130326 insertions(+), 47293 deletions(-) delete mode 100644 vendor/github.com/PuerkitoBio/purell/.gitignore delete mode 100644 vendor/github.com/PuerkitoBio/purell/.travis.yml delete mode 100644 vendor/github.com/PuerkitoBio/purell/LICENSE delete mode 100644 vendor/github.com/PuerkitoBio/purell/README.md delete mode 100644 vendor/github.com/PuerkitoBio/purell/purell.go delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/.travis.yml delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/README.md delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/urlesc.go delete mode 100644 vendor/github.com/emicklei/go-restful/Makefile rename vendor/github.com/emicklei/go-restful/{ => v3}/.gitignore (100%) create mode 100644 vendor/github.com/emicklei/go-restful/v3/.goconvey rename vendor/github.com/emicklei/go-restful/{ => v3}/.travis.yml (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/CHANGES.md (69%) rename vendor/github.com/emicklei/go-restful/{ => v3}/LICENSE (100%) create mode 100644 vendor/github.com/emicklei/go-restful/v3/Makefile rename vendor/github.com/emicklei/go-restful/{ => v3}/README.md (80%) create mode 100644 vendor/github.com/emicklei/go-restful/v3/SECURITY.md rename vendor/github.com/emicklei/go-restful/{ => v3}/Srcfile (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/bench_test.sh (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/compress.go (92%) rename vendor/github.com/emicklei/go-restful/{ => v3}/compressor_cache.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/compressor_pools.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/compressors.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/constants.go (89%) rename vendor/github.com/emicklei/go-restful/{ => v3}/container.go (97%) rename vendor/github.com/emicklei/go-restful/{ => v3}/cors_filter.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/coverage.sh (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/curly.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/curly_route.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/custom_verb.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/doc.go (95%) rename vendor/github.com/emicklei/go-restful/{ => v3}/entity_accessors.go (100%) create mode 100644 vendor/github.com/emicklei/go-restful/v3/extensions.go rename vendor/github.com/emicklei/go-restful/{ => v3}/filter.go (79%) create mode 100644 vendor/github.com/emicklei/go-restful/v3/filter_adapter.go rename vendor/github.com/emicklei/go-restful/{ => v3}/json.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/jsoniter.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/jsr311.go (96%) rename vendor/github.com/emicklei/go-restful/{ => v3}/log/log.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/logger.go (95%) rename vendor/github.com/emicklei/go-restful/{ => v3}/mime.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/options_filter.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/parameter.go (60%) rename vendor/github.com/emicklei/go-restful/{ => v3}/path_expression.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/path_processor.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/request.go (85%) rename vendor/github.com/emicklei/go-restful/{ => v3}/response.go (99%) rename vendor/github.com/emicklei/go-restful/{ => v3}/route.go (96%) rename vendor/github.com/emicklei/go-restful/{ => v3}/route_builder.go (88%) rename vendor/github.com/emicklei/go-restful/{ => v3}/route_reader.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/router.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/service_error.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/web_service.go (96%) rename vendor/github.com/emicklei/go-restful/{ => v3}/web_service_container.go (100%) delete mode 100644 vendor/github.com/evanphx/json-patch/.gitignore delete mode 100644 vendor/github.com/evanphx/json-patch/README.md rename vendor/github.com/evanphx/json-patch/{ => v5}/LICENSE (100%) rename vendor/github.com/evanphx/json-patch/{ => v5}/errors.go (100%) rename vendor/github.com/evanphx/json-patch/{ => v5}/merge.go (87%) rename vendor/github.com/evanphx/json-patch/{ => v5}/patch.go (56%) delete mode 100644 vendor/github.com/fsnotify/fsnotify/AUTHORS create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_fen.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_inotify.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_other.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_windows.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/fen.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/inotify.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/inotify_poller.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/mkdoc.zsh rename vendor/github.com/fsnotify/fsnotify/{open_mode_bsd.go => system_bsd.go} (57%) rename vendor/github.com/fsnotify/fsnotify/{open_mode_darwin.go => system_darwin.go} (52%) delete mode 100644 vendor/github.com/fsnotify/fsnotify/windows.go create mode 100644 vendor/github.com/go-kit/log/staticcheck.conf delete mode 100644 vendor/github.com/go-openapi/jsonpointer/.travis.yml delete mode 100644 vendor/github.com/go-openapi/jsonreference/.travis.yml create mode 100644 vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go create mode 100644 vendor/github.com/go-openapi/swag/.gitattributes delete mode 100644 vendor/github.com/go-openapi/swag/.travis.yml create mode 100644 vendor/github.com/go-openapi/swag/file.go rename vendor/github.com/google/{gnostic => gnostic-models}/LICENSE (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/README.md (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/context.go (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/error.go (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/extensions.go (97%) rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/helpers.go (99%) rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/main.go (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/reader.go (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/extensions/README.md (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/extensions/extension.pb.go (99%) rename vendor/github.com/google/{gnostic => gnostic-models}/extensions/extension.proto (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/extensions/extensions.go (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/README.md (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/base.go (90%) rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/display.go (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/models.go (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/operations.go (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/reader.go (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/schema.json (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/writer.go (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv2/OpenAPIv2.go (99%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv2/OpenAPIv2.pb.go (99%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv2/OpenAPIv2.proto (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv2/README.md (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv2/document.go (96%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv2/openapi-2.0.json (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv3/OpenAPIv3.go (99%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv3/OpenAPIv3.pb.go (99%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv3/OpenAPIv3.proto (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv3/README.md (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv3/document.go (96%) delete mode 100644 vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json delete mode 100644 vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json create mode 100644 vendor/github.com/google/gofuzz/bytesource/bytesource.go create mode 100644 vendor/github.com/imdario/mergo/CONTRIBUTING.md create mode 100644 vendor/github.com/imdario/mergo/SECURITY.md create mode 100644 vendor/github.com/moby/spdystream/CONTRIBUTING.md create mode 100644 vendor/github.com/moby/spdystream/LICENSE create mode 100644 vendor/github.com/moby/spdystream/MAINTAINERS create mode 100644 vendor/github.com/moby/spdystream/NOTICE create mode 100644 vendor/github.com/moby/spdystream/README.md create mode 100644 vendor/github.com/moby/spdystream/connection.go create mode 100644 vendor/github.com/moby/spdystream/handlers.go create mode 100644 vendor/github.com/moby/spdystream/priority.go create mode 100644 vendor/github.com/moby/spdystream/spdy/dictionary.go create mode 100644 vendor/github.com/moby/spdystream/spdy/read.go create mode 100644 vendor/github.com/moby/spdystream/spdy/types.go create mode 100644 vendor/github.com/moby/spdystream/spdy/write.go create mode 100644 vendor/github.com/moby/spdystream/stream.go create mode 100644 vendor/github.com/moby/spdystream/utils.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/get_pid.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go rename vendor/github.com/prometheus/client_golang/prometheus/{go_collector_go117.go => go_collector_latest.go} (52%) create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/num_threads.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vnext.go create mode 100644 vendor/github.com/prometheus/common/model/value_float.go create mode 100644 vendor/github.com/prometheus/common/model/value_histogram.go create mode 100644 vendor/github.com/prometheus/common/model/value_type.go create mode 100644 vendor/github.com/prometheus/common/version/info_default.go create mode 100644 vendor/github.com/prometheus/common/version/info_go118.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_loong64.go delete mode 100644 vendor/github.com/prometheus/procfs/fixtures.ttar rename vendor/github.com/prometheus/procfs/{xfrm.go => net_xfrm.go} (96%) create mode 100644 vendor/github.com/prometheus/procfs/proc_cgroups.go create mode 100644 vendor/github.com/prometheus/procfs/proc_interrupts.go create mode 100644 vendor/github.com/prometheus/procfs/proc_netstat.go create mode 100644 vendor/github.com/prometheus/procfs/proc_snmp.go create mode 100644 vendor/github.com/prometheus/procfs/proc_snmp6.go create mode 100644 vendor/github.com/prometheus/procfs/proc_sys.go create mode 100644 vendor/github.com/prometheus/procfs/softirqs.go create mode 100644 vendor/github.com/prometheus/procfs/thread.go create mode 100644 vendor/github.com/vladimirvivien/gexe/http.go create mode 100644 vendor/github.com/vladimirvivien/gexe/http/http_reader.go create mode 100644 vendor/github.com/vladimirvivien/gexe/http/http_response.go create mode 100644 vendor/github.com/vladimirvivien/gexe/http/http_writer.go create mode 100644 vendor/golang.org/x/net/http/httpproxy/proxy.go create mode 100644 vendor/golang.org/x/net/internal/socks/client.go create mode 100644 vendor/golang.org/x/net/internal/socks/socks.go create mode 100644 vendor/golang.org/x/net/proxy/dial.go create mode 100644 vendor/golang.org/x/net/proxy/direct.go create mode 100644 vendor/golang.org/x/net/proxy/per_host.go create mode 100644 vendor/golang.org/x/net/proxy/proxy.go create mode 100644 vendor/golang.org/x/net/proxy/socks5.go delete mode 100644 vendor/golang.org/x/text/width/kind_string.go delete mode 100644 vendor/golang.org/x/text/width/tables10.0.0.go delete mode 100644 vendor/golang.org/x/text/width/tables11.0.0.go delete mode 100644 vendor/golang.org/x/text/width/tables12.0.0.go delete mode 100644 vendor/golang.org/x/text/width/tables13.0.0.go delete mode 100644 vendor/golang.org/x/text/width/tables15.0.0.go delete mode 100644 vendor/golang.org/x/text/width/tables9.0.0.go delete mode 100644 vendor/golang.org/x/text/width/transform.go delete mode 100644 vendor/golang.org/x/text/width/trieval.go delete mode 100644 vendor/golang.org/x/text/width/width.go create mode 100644 vendor/golang.org/x/time/rate/sometimes.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/doc.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/register.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/types.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta3/doc.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta3/register.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta3/types.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/networking/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/networking/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/networking/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/networking/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/networking/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go create mode 100644 vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/resource/v1alpha2/doc.go create mode 100644 vendor/k8s.io/api/resource/v1alpha2/generated.pb.go create mode 100644 vendor/k8s.io/api/resource/v1alpha2/generated.proto create mode 100644 vendor/k8s.io/api/resource/v1alpha2/register.go create mode 100644 vendor/k8s.io/api/resource/v1alpha2/types.go create mode 100644 vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/generic.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/splice.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/dump/dump.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versioncheck.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/ordered.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/set.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/delay.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/error.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/loop.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/poll.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/timer.go create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go rename vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/{rulewithoperations.go => namedrulewithoperations.go} (56%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go rename vendor/k8s.io/client-go/applyconfigurations/{extensions/v1beta1/allowedflexvolume.go => apps/v1beta1/statefulsetordinals.go} (50%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/claimsource.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go rename vendor/k8s.io/client-go/applyconfigurations/{autoscaling/v2/podresourcemetricsource.go => core/v1/podresourceclaim.go} (51%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go rename vendor/k8s.io/client-go/applyconfigurations/{extensions/v1beta1/allowedcsidriver.go => core/v1/resourceclaim.go} (65%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/exemptprioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go rename vendor/k8s.io/client-go/applyconfigurations/{networking/v1/networkpolicystatus.go => flowcontrol/v1beta3/flowschemastatus.go} (61%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go rename vendor/k8s.io/client-go/applyconfigurations/{extensions/v1beta1/networkpolicystatus.go => flowcontrol/v1beta3/prioritylevelconfigurationstatus.go} (57%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go rename vendor/k8s.io/client-go/applyconfigurations/{extensions/v1beta1/podsecuritypolicy.go => networking/v1alpha1/ipaddress.go} (66%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go create mode 100644 vendor/k8s.io/client-go/discovery/aggregated_discovery.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go create mode 100644 vendor/k8s.io/client-go/informers/certificates/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/doc.go rename vendor/k8s.io/client-go/informers/{extensions/v1beta1/podsecuritypolicy.go => flowcontrol/v1beta3/flowschema.go} (51%) create mode 100644 vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/interface.go create mode 100644 vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/informers/networking/v1alpha1/clustercidr.go create mode 100644 vendor/k8s.io/client-go/informers/networking/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/networking/v1alpha1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/informers/resource/interface.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1alpha2/interface.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1alpha2/podschedulingcontext.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go create mode 100644 vendor/k8s.io/client-go/listers/certificates/v1alpha1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go create mode 100644 vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/listers/networking/v1alpha1/clustercidr.go create mode 100644 vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha2/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha2/podschedulingcontext.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclass.go create mode 100644 vendor/k8s.io/client-go/openapi/OWNERS create mode 100644 vendor/k8s.io/client-go/openapi/typeconverter.go create mode 100644 vendor/k8s.io/client-go/tools/cache/object-names.go create mode 100644 vendor/k8s.io/client-go/tools/cache/retry_with_deadline.go create mode 100644 vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go create mode 100644 vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/doc.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/errorstream.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/reader.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/resize.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v1.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v2.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v3.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v4.go create mode 100644 vendor/k8s.io/client-go/transport/cache_go118.go create mode 100644 vendor/k8s.io/client-go/transport/spdy/spdy.go create mode 100644 vendor/k8s.io/client-go/util/exec/exec.go create mode 100644 vendor/k8s.io/klog/v2/format.go create mode 100644 vendor/k8s.io/klog/v2/internal/dbg/dbg.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/cached/cache.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/flags.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/handler/handler_cache.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/serialization.go rename vendor/{golang.org/x/time => k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json}/AUTHORS (74%) rename vendor/{golang.org/x/time => k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json}/CONTRIBUTORS (70%) rename vendor/{github.com/PuerkitoBio/urlesc => k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json}/LICENSE (96%) create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/README.md create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_funcs.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/errors.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fields.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fold.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/intern.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go create mode 100644 vendor/k8s.io/utils/net/ipfamily.go create mode 100644 vendor/k8s.io/utils/pointer/OWNERS create mode 100644 vendor/k8s.io/utils/pointer/README.md create mode 100644 vendor/k8s.io/utils/pointer/pointer.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go rename vendor/sigs.k8s.io/controller-runtime/pkg/client/{client_cache.go => client_rest_resources.go} (82%) delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go create mode 100644 vendor/sigs.k8s.io/e2e-framework/klient/decoder/decoder.go create mode 100644 vendor/sigs.k8s.io/e2e-framework/klient/k8s/watcher/watch.go create mode 100644 vendor/sigs.k8s.io/e2e-framework/pkg/envfuncs/provider_funcs.go create mode 100644 vendor/sigs.k8s.io/e2e-framework/pkg/envfuncs/resource_funcs.go create mode 100644 vendor/sigs.k8s.io/e2e-framework/support/types.go create mode 100644 vendor/sigs.k8s.io/e2e-framework/support/utils/command.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go diff --git a/go.mod b/go.mod index a561de19c..4025f53ab 100644 --- a/go.mod +++ b/go.mod @@ -19,61 +19,59 @@ require ( github.com/netobserv/netobserv-ebpf-agent v0.3.2-0.20230719114816-31526a912a03 github.com/netsampler/goflow2 v1.1.1-0.20220509155230-5300494e4785 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.12.1 - github.com/prometheus/common v0.32.1 + github.com/prometheus/client_golang v1.15.1 + github.com/prometheus/common v0.44.0 github.com/segmentio/kafka-go v0.4.38 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.10.1 - github.com/stretchr/testify v1.8.1 - github.com/vladimirvivien/gexe v0.1.1 + github.com/stretchr/testify v1.8.2 + github.com/vladimirvivien/gexe v0.2.0 github.com/vmware/go-ipfix v0.5.13 golang.org/x/net v0.14.0 google.golang.org/grpc v1.58.1 google.golang.org/protobuf v1.31.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.24.0 - k8s.io/apimachinery v0.24.0 - k8s.io/client-go v0.24.0 - sigs.k8s.io/e2e-framework v0.0.6 + k8s.io/api v0.28.2 + k8s.io/apimachinery v0.28.2 + k8s.io/client-go v0.28.2 + sigs.k8s.io/e2e-framework v0.3.0 ) require ( - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/emicklei/go-restful v2.16.0+incompatible // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect - github.com/fsnotify/fsnotify v1.5.1 // indirect - github.com/go-kit/log v0.2.0 // indirect + github.com/emicklei/go-restful/v3 v3.10.2 // indirect + github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.5 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.5.9 // indirect - github.com/google/gofuzz v1.1.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.3.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/imdario/mergo v0.3.15 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/klauspost/compress v1.16.7 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect - github.com/kr/pretty v0.3.0 // indirect github.com/libp2p/go-reuseport v0.1.0 // indirect github.com/magiconair/properties v1.8.5 // indirect - github.com/mailru/easyjson v0.7.6 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/minio/sha256-simd v1.0.1 // indirect + github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -85,8 +83,8 @@ require ( github.com/pion/transport/v2 v2.0.0 // indirect github.com/pion/udp v0.1.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/procfs v0.7.3 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/procfs v0.9.0 // indirect github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24 // indirect github.com/rs/xid v1.5.0 // indirect github.com/spf13/afero v1.6.0 // indirect @@ -102,26 +100,25 @@ require ( golang.org/x/sys v0.11.0 // indirect golang.org/x/term v0.11.0 // indirect golang.org/x/text v0.12.0 // indirect - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect + golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/klog/v2 v2.60.1 // indirect - k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect - k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect - sigs.k8s.io/controller-runtime v0.11.0 // indirect - sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect + k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect + k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect + sigs.k8s.io/controller-runtime v0.15.1 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) require ( - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/logr v1.2.4 // indirect github.com/google/go-jsonnet v0.19.1 gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 // indirect ) -replace github.com/prometheus/common v0.32.1 => github.com/netobserv/prometheus-common v0.31.2-0.20220720134304-43e74fd22881 +replace github.com/prometheus/common v0.44.0 => github.com/netobserv/prometheus-common v0.44.0-netobserv diff --git a/go.sum b/go.sum index 5aebfdb2d..250a58fb2 100644 --- a/go.sum +++ b/go.sum @@ -15,11 +15,6 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -48,11 +43,9 @@ github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+B github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.10/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= @@ -64,7 +57,6 @@ github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsI github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -79,10 +71,8 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0 github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.27.2/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II= @@ -106,7 +96,7 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -134,7 +124,6 @@ github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -145,8 +134,6 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= @@ -191,34 +178,29 @@ github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7j github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.16.0+incompatible h1:rgqiKNjTnFQA6kkhFe16D8epTksy9HQ1MyrbDXSdYhM= -github.com/emicklei/go-restful v2.16.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= +github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= @@ -234,8 +216,8 @@ github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgO github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -245,9 +227,9 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -265,15 +247,15 @@ github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwds github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -306,8 +288,8 @@ github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= @@ -315,6 +297,7 @@ github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85n github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= @@ -355,7 +338,6 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -363,7 +345,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -379,8 +360,6 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -390,10 +369,9 @@ github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -402,7 +380,6 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -413,11 +390,11 @@ github.com/google/go-jsonnet v0.19.1 h1:MORxkrG0elylUqh36R4AcSPX0oZQa9hvI3lroN+k github.com/google/go-jsonnet v0.19.1/go.mod h1:5JVT33JVCoehdTj5Z2KJq1eIdt3Nb8PCmZ+W5D8U350= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -426,10 +403,7 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201007051231-1066cbb265c7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -444,7 +418,6 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -499,10 +472,9 @@ github.com/hetznercloud/hcloud-go v1.22.0/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwI github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= +github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -571,8 +543,7 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -595,8 +566,8 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mariomac/guara v0.0.0-20220523124851-5fc279816f1f h1:mjj2aCHv9orQy7Y0OGs03wZNtuQHfNgCKY44eOIloe0= github.com/mariomac/guara v0.0.0-20220523124851-5fc279816f1f/go.mod h1:Ec37gLe3vH+cnOp7x3qfd+0sz0pnP3CyIXKmQJ2ZOXU= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= @@ -616,8 +587,9 @@ github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 h1:2oDp6OOhLxQ9JBoUuysVz9UZ9uI6oLUbvAZu0x8o+vE= github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118/go.mod h1:ZFUnHIVchZ9lJoWoEGUg8Q3M4U8aNNWA3CVSUTkW4og= github.com/mdlayher/packet v1.0.0/go.mod h1:eE7/ctqDhoiRhQ44ko5JZU2zxB88g+JH/6jmnjzPjOU= @@ -644,6 +616,7 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -676,13 +649,11 @@ github.com/netobserv/loki-client-go v0.0.0-20220927092034-f37122a54500 h1:RmnoJe github.com/netobserv/loki-client-go v0.0.0-20220927092034-f37122a54500/go.mod h1:LHXpc5tjKvsfZn0pwLKrvlgEhZcCaw3Di9mUEZGAI4E= github.com/netobserv/netobserv-ebpf-agent v0.3.2-0.20230719114816-31526a912a03 h1:1JWwW3KPAF+GbepmVO7+w4/veFLdV7+bprrGAff+Y7A= github.com/netobserv/netobserv-ebpf-agent v0.3.2-0.20230719114816-31526a912a03/go.mod h1:qnu3gGPF0zBc2QEvt7Hc7j9pA02UQzc0sztr30FWh6o= -github.com/netobserv/prometheus-common v0.31.2-0.20220720134304-43e74fd22881 h1:hx5bi6xBovRjmwUoVJBzhJ3EDo4K4ZUsqqKrJuQ2vMI= -github.com/netobserv/prometheus-common v0.31.2-0.20220720134304-43e74fd22881/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/netobserv/prometheus-common v0.44.0-netobserv h1:1DEcYfG8UQcDRjHax+MBJGpwbnsQNB+fiiMh54mW4yU= +github.com/netobserv/prometheus-common v0.44.0-netobserv/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/netsampler/goflow2 v1.1.1-0.20220509155230-5300494e4785 h1:qhDrIMXlk8YV7BxwA6UR/dQVdUzohjLlmrUXymsBx6g= github.com/netsampler/goflow2 v1.1.1-0.20220509155230-5300494e4785/go.mod h1:yqw2cLe+lbnDN1+JKBqxoj2FKOA83iB8wV0aCKnlesg= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= @@ -692,16 +663,13 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= +github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= @@ -768,15 +736,16 @@ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= +github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -797,8 +766,8 @@ github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4 github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24 h1:V/4Cj2GytqdqK7OMEz6c4LNjey3SNyfw3pg5jPKtJvQ= github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24/go.mod h1:MDRkz271loM/PrYN+wUNEaTMDGSP760MQzB0yEjdgSQ= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= @@ -810,8 +779,7 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= @@ -868,7 +836,6 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk= github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -887,8 +854,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -901,8 +869,8 @@ github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6 github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vladimirvivien/gexe v0.1.1 h1:2A0SBaOSKH+cwLVdt6H+KkHZotZWRNLlWygANGw5DxE= -github.com/vladimirvivien/gexe v0.1.1/go.mod h1:LHQL00w/7gDUKIak24n801ABp8C+ni6eBht9vGVst8w= +github.com/vladimirvivien/gexe v0.2.0 h1:nbdAQ6vbZ+ZNsolCgSVb9Fno60kzSuvtzVh6Ytqi/xY= +github.com/vladimirvivien/gexe v0.2.0/go.mod h1:LHQL00w/7gDUKIak24n801ABp8C+ni6eBht9vGVst8w= github.com/vmware/go-ipfix v0.5.13 h1:LtuJVf38XCghUN+WaI9OwQ1U7yim/pcmxz0PzdCqUnQ= github.com/vmware/go-ipfix v0.5.13/go.mod h1:YqAPuFn4UMdiJVUI5YGXtrSmqi+lNMx2jewYOUryuws= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= @@ -920,7 +888,6 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -937,8 +904,6 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -946,7 +911,7 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= @@ -954,7 +919,7 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -976,7 +941,6 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= @@ -1007,7 +971,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1016,9 +979,6 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1055,7 +1015,6 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1063,17 +1022,10 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220706163947-c90051bbdb60/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= @@ -1086,13 +1038,7 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1135,14 +1081,12 @@ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1163,7 +1107,6 @@ golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1174,28 +1117,17 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201008064518-c1f3e3309c71/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1216,7 +1148,6 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= @@ -1229,8 +1160,8 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1288,7 +1219,6 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1299,19 +1229,14 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201020161133-226fd2f889ca/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= +gomodules.xyz/jsonpatch/v2 v2.3.0 h1:8NFhfS6gzxNqjLIYnZxg319wZ5Qjnx4m/CcX+Klzazc= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= @@ -1336,11 +1261,6 @@ google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1384,16 +1304,6 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1415,11 +1325,6 @@ google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.58.1 h1:OL+Vz23DTtrrldqHK49FUOPHyY75rvFqJfXC84NYW58= google.golang.org/grpc v1.58.1/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1434,7 +1339,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= @@ -1464,7 +1368,6 @@ gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mN gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= @@ -1480,7 +1383,6 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= @@ -1496,51 +1398,49 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= -k8s.io/api v0.24.0 h1:J0hann2hfxWr1hinZIDefw7Q96wmCBx6SSB8IY0MdDg= -k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I= -k8s.io/apiextensions-apiserver v0.23.0 h1:uii8BYmHYiT2ZTAJxmvc3X8UhNYMxl2A0z0Xq3Pm+WY= +k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= +k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg= +k8s.io/apiextensions-apiserver v0.27.2 h1:iwhyoeS4xj9Y7v8YExhUwbVuBhMr3Q4bd/laClBV6Bo= k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= -k8s.io/apimachinery v0.24.0 h1:ydFCyC/DjCvFCHK5OPMKBlxayQytB8pxy8YQInd5UyQ= -k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= +k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ= +k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU= k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= -k8s.io/client-go v0.24.0 h1:lbE4aB1gTHvYFSwm6eD3OF14NhFDKCejlnsGYlSJe5U= -k8s.io/client-go v0.24.0/go.mod h1:VFPQET+cAFpYxh6Bq6f4xyMY80G6jKKktU6G0m00VDw= +k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY= +k8s.io/client-go v0.28.2/go.mod h1:sMkApowspLuc7omj1FOSUxSoqjr+d5Q0Yc0LOFnYFJY= k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= -k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= +k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.11.0 h1:DqO+c8mywcZLFJWILq4iktoECTyn30Bkj0CwgqMpZWQ= -sigs.k8s.io/controller-runtime v0.11.0/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA= -sigs.k8s.io/e2e-framework v0.0.6 h1:mGalOzsc25nz4GBOD6oVWBFKFcAanMso6oj3+4wiCFw= -sigs.k8s.io/e2e-framework v0.0.6/go.mod h1:XSknNb1ovbtOyNNjV8DKuY9Nr4rta4wwtnZq3IRGMl0= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= +sigs.k8s.io/controller-runtime v0.15.1 h1:9UvgKD4ZJGcj24vefUFgZFP3xej/3igL9BsOUTb/+4c= +sigs.k8s.io/controller-runtime v0.15.1/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= +sigs.k8s.io/e2e-framework v0.3.0 h1:eqQALBtPCth8+ulTs6lcPK7ytV5rZSSHJzQHZph4O7U= +sigs.k8s.io/e2e-framework v0.3.0/go.mod h1:C+ef37/D90Dc7Xq1jQnNbJYscrUGpxrWog9bx2KIa+c= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= diff --git a/pkg/test/e2e/utils.go b/pkg/test/e2e/utils.go index 075f4b4dd..9a37cc813 100644 --- a/pkg/test/e2e/utils.go +++ b/pkg/test/e2e/utils.go @@ -40,6 +40,7 @@ import ( "sigs.k8s.io/e2e-framework/pkg/env" "sigs.k8s.io/e2e-framework/pkg/envconf" "sigs.k8s.io/e2e-framework/pkg/envfuncs" + "sigs.k8s.io/e2e-framework/support/kind" ) type namespaceContextKey string @@ -77,7 +78,7 @@ func e2eRecreateKindCluster(clusterName string) env.Func { return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { fmt.Printf("====> Recreating KIND cluster - %s\n", clusterName) gexe.RunProc(fmt.Sprintf(`kind delete cluster --name %s`, clusterName)) - newCtx, err := envfuncs.CreateKindCluster(clusterName)(ctx, cfg) + newCtx, err := envfuncs.CreateCluster(kind.NewProvider(), clusterName)(ctx, cfg) fmt.Printf("====> Done.\n") return newCtx, err } @@ -86,7 +87,7 @@ func e2eRecreateKindCluster(clusterName string) env.Func { func e2eDeleteKindCluster(clusterName string) env.Func { return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { fmt.Printf("====> Deleting KIND cluster - %s\n", clusterName) - newCtx, err := envfuncs.DestroyKindCluster(clusterName)(ctx, cfg) + newCtx, err := envfuncs.DestroyCluster(clusterName)(ctx, cfg) fmt.Printf("\n====> Done.\n") return newCtx, err } diff --git a/vendor/github.com/PuerkitoBio/purell/.gitignore b/vendor/github.com/PuerkitoBio/purell/.gitignore deleted file mode 100644 index 748e4c807..000000000 --- a/vendor/github.com/PuerkitoBio/purell/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.sublime-* -.DS_Store -*.swp -*.swo -tags diff --git a/vendor/github.com/PuerkitoBio/purell/.travis.yml b/vendor/github.com/PuerkitoBio/purell/.travis.yml deleted file mode 100644 index cf31e6af6..000000000 --- a/vendor/github.com/PuerkitoBio/purell/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - "1.10.x" - - "1.11.x" - - tip diff --git a/vendor/github.com/PuerkitoBio/purell/LICENSE b/vendor/github.com/PuerkitoBio/purell/LICENSE deleted file mode 100644 index 4b9986dea..000000000 --- a/vendor/github.com/PuerkitoBio/purell/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -Copyright (c) 2012, Martin Angers -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md deleted file mode 100644 index 07de0c498..000000000 --- a/vendor/github.com/PuerkitoBio/purell/README.md +++ /dev/null @@ -1,188 +0,0 @@ -# Purell - -Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know... - -Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. - -[![build status](https://travis-ci.org/PuerkitoBio/purell.svg?branch=master)](http://travis-ci.org/PuerkitoBio/purell) - -## Install - -`go get github.com/PuerkitoBio/purell` - -## Changelog - -* **v1.1.1** : Fix failing test due to Go1.12 changes (thanks to @ianlancetaylor). -* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121). -* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich). -* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]). -* **v0.2.0** : Add benchmarks, Attempt IDN support. -* **v0.1.0** : Initial release. - -## Examples - -From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."): - -```go -package purell - -import ( - "fmt" - "net/url" -) - -func ExampleNormalizeURLString() { - if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/", - FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil { - panic(err) - } else { - fmt.Print(normalized) - } - // Output: http://somewebsite.com:80/Amazing%3F/url/ -} - -func ExampleMustNormalizeURLString() { - normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/", - FlagsUnsafeGreedy) - fmt.Print(normalized) - - // Output: http://somewebsite.com/Amazing%FA/url -} - -func ExampleNormalizeURL() { - if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil { - panic(err) - } else { - normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment) - fmt.Print(normalized) - } - - // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0 -} -``` - -## API - -As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags: - -```go -const ( - // Safe normalizations - FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 - FlagLowercaseHost // http://HOST -> http://host - FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF - FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA - FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ - FlagRemoveDefaultPort // http://host:80 -> http://host - FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path - - // Usually safe normalizations - FlagRemoveTrailingSlash // http://host/path/ -> http://host/path - FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) - FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c - - // Unsafe normalizations - FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ - FlagRemoveFragment // http://host/path#fragment -> http://host/path - FlagForceHTTP // https://host -> http://host - FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b - FlagRemoveWWW // http://www.host/ -> http://host/ - FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) - FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 - - // Normalizations not in the wikipedia article, required to cover tests cases - // submitted by jehiah - FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 - FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 - FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 - FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path - FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path - - // Convenience set of safe normalizations - FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator - - // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, - // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". - - // Convenience set of usually safe normalizations (includes FlagsSafe) - FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments - FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments - - // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) - FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery - FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery - - // Convenience set of all available flags - FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator - FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator -) -``` - -For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set. - -The [full godoc reference is available on gopkgdoc][godoc]. - -Some things to note: - -* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it. - -* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*): - - %24 -> $ - - %26 -> & - - %2B-%3B -> +,-./0123456789:; - - %3D -> = - - %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ - - %5F -> _ - - %61-%7A -> abcdefghijklmnopqrstuvwxyz - - %7E -> ~ - - -* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization). - -* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell. - -* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object. - -### Safe vs Usually Safe vs Unsafe - -Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between. - -Consider the following URL: - -`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` - -Normalizing with the `FlagsSafe` gives: - -`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` - -With the `FlagsUsuallySafeGreedy`: - -`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid` - -And with `FlagsUnsafeGreedy`: - -`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3` - -## TODOs - -* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`. - -## Thanks / Contributions - -@rogpeppe -@jehiah -@opennota -@pchristopher1275 -@zenovich -@beeker1121 - -## License - -The [BSD 3-Clause license][bsd]. - -[bsd]: http://opensource.org/licenses/BSD-3-Clause -[wiki]: http://en.wikipedia.org/wiki/URL_normalization -[rfc]: http://tools.ietf.org/html/rfc3986#section-6 -[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell -[pr5]: https://github.com/PuerkitoBio/purell/pull/5 -[iss7]: https://github.com/PuerkitoBio/purell/issues/7 diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go deleted file mode 100644 index 6d0fc190a..000000000 --- a/vendor/github.com/PuerkitoBio/purell/purell.go +++ /dev/null @@ -1,379 +0,0 @@ -/* -Package purell offers URL normalization as described on the wikipedia page: -http://en.wikipedia.org/wiki/URL_normalization -*/ -package purell - -import ( - "bytes" - "fmt" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/PuerkitoBio/urlesc" - "golang.org/x/net/idna" - "golang.org/x/text/unicode/norm" - "golang.org/x/text/width" -) - -// A set of normalization flags determines how a URL will -// be normalized. -type NormalizationFlags uint - -const ( - // Safe normalizations - FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 - FlagLowercaseHost // http://HOST -> http://host - FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF - FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA - FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ - FlagRemoveDefaultPort // http://host:80 -> http://host - FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path - - // Usually safe normalizations - FlagRemoveTrailingSlash // http://host/path/ -> http://host/path - FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) - FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c - - // Unsafe normalizations - FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ - FlagRemoveFragment // http://host/path#fragment -> http://host/path - FlagForceHTTP // https://host -> http://host - FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b - FlagRemoveWWW // http://www.host/ -> http://host/ - FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) - FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 - - // Normalizations not in the wikipedia article, required to cover tests cases - // submitted by jehiah - FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 - FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 - FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 - FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path - FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path - - // Convenience set of safe normalizations - FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator - - // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, - // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". - - // Convenience set of usually safe normalizations (includes FlagsSafe) - FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments - FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments - - // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) - FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery - FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery - - // Convenience set of all available flags - FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator - FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator -) - -const ( - defaultHttpPort = ":80" - defaultHttpsPort = ":443" -) - -// Regular expressions used by the normalizations -var rxPort = regexp.MustCompile(`(:\d+)/?$`) -var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`) -var rxDupSlashes = regexp.MustCompile(`/{2,}`) -var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`) -var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`) -var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`) -var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`) -var rxEmptyPort = regexp.MustCompile(`:+$`) - -// Map of flags to implementation function. -// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically -// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator. - -// Since maps have undefined traversing order, make a slice of ordered keys -var flagsOrder = []NormalizationFlags{ - FlagLowercaseScheme, - FlagLowercaseHost, - FlagRemoveDefaultPort, - FlagRemoveDirectoryIndex, - FlagRemoveDotSegments, - FlagRemoveFragment, - FlagForceHTTP, // Must be after remove default port (because https=443/http=80) - FlagRemoveDuplicateSlashes, - FlagRemoveWWW, - FlagAddWWW, - FlagSortQuery, - FlagDecodeDWORDHost, - FlagDecodeOctalHost, - FlagDecodeHexHost, - FlagRemoveUnnecessaryHostDots, - FlagRemoveEmptyPortSeparator, - FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last - FlagAddTrailingSlash, -} - -// ... and then the map, where order is unimportant -var flags = map[NormalizationFlags]func(*url.URL){ - FlagLowercaseScheme: lowercaseScheme, - FlagLowercaseHost: lowercaseHost, - FlagRemoveDefaultPort: removeDefaultPort, - FlagRemoveDirectoryIndex: removeDirectoryIndex, - FlagRemoveDotSegments: removeDotSegments, - FlagRemoveFragment: removeFragment, - FlagForceHTTP: forceHTTP, - FlagRemoveDuplicateSlashes: removeDuplicateSlashes, - FlagRemoveWWW: removeWWW, - FlagAddWWW: addWWW, - FlagSortQuery: sortQuery, - FlagDecodeDWORDHost: decodeDWORDHost, - FlagDecodeOctalHost: decodeOctalHost, - FlagDecodeHexHost: decodeHexHost, - FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots, - FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator, - FlagRemoveTrailingSlash: removeTrailingSlash, - FlagAddTrailingSlash: addTrailingSlash, -} - -// MustNormalizeURLString returns the normalized string, and panics if an error occurs. -// It takes an URL string as input, as well as the normalization flags. -func MustNormalizeURLString(u string, f NormalizationFlags) string { - result, e := NormalizeURLString(u, f) - if e != nil { - panic(e) - } - return result -} - -// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object. -// It takes an URL string as input, as well as the normalization flags. -func NormalizeURLString(u string, f NormalizationFlags) (string, error) { - parsed, err := url.Parse(u) - if err != nil { - return "", err - } - - if f&FlagLowercaseHost == FlagLowercaseHost { - parsed.Host = strings.ToLower(parsed.Host) - } - - // The idna package doesn't fully conform to RFC 5895 - // (https://tools.ietf.org/html/rfc5895), so we do it here. - // Taken from Go 1.8 cycle source, courtesy of bradfitz. - // TODO: Remove when (if?) idna package conforms to RFC 5895. - parsed.Host = width.Fold.String(parsed.Host) - parsed.Host = norm.NFC.String(parsed.Host) - if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil { - return "", err - } - - return NormalizeURL(parsed, f), nil -} - -// NormalizeURL returns the normalized string. -// It takes a parsed URL object as input, as well as the normalization flags. -func NormalizeURL(u *url.URL, f NormalizationFlags) string { - for _, k := range flagsOrder { - if f&k == k { - flags[k](u) - } - } - return urlesc.Escape(u) -} - -func lowercaseScheme(u *url.URL) { - if len(u.Scheme) > 0 { - u.Scheme = strings.ToLower(u.Scheme) - } -} - -func lowercaseHost(u *url.URL) { - if len(u.Host) > 0 { - u.Host = strings.ToLower(u.Host) - } -} - -func removeDefaultPort(u *url.URL) { - if len(u.Host) > 0 { - scheme := strings.ToLower(u.Scheme) - u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { - if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { - return "" - } - return val - }) - } -} - -func removeTrailingSlash(u *url.URL) { - if l := len(u.Path); l > 0 { - if strings.HasSuffix(u.Path, "/") { - u.Path = u.Path[:l-1] - } - } else if l = len(u.Host); l > 0 { - if strings.HasSuffix(u.Host, "/") { - u.Host = u.Host[:l-1] - } - } -} - -func addTrailingSlash(u *url.URL) { - if l := len(u.Path); l > 0 { - if !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - } else if l = len(u.Host); l > 0 { - if !strings.HasSuffix(u.Host, "/") { - u.Host += "/" - } - } -} - -func removeDotSegments(u *url.URL) { - if len(u.Path) > 0 { - var dotFree []string - var lastIsDot bool - - sections := strings.Split(u.Path, "/") - for _, s := range sections { - if s == ".." { - if len(dotFree) > 0 { - dotFree = dotFree[:len(dotFree)-1] - } - } else if s != "." { - dotFree = append(dotFree, s) - } - lastIsDot = (s == "." || s == "..") - } - // Special case if host does not end with / and new path does not begin with / - u.Path = strings.Join(dotFree, "/") - if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") { - u.Path = "/" + u.Path - } - // Special case if the last segment was a dot, make sure the path ends with a slash - if lastIsDot && !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - } -} - -func removeDirectoryIndex(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1") - } -} - -func removeFragment(u *url.URL) { - u.Fragment = "" -} - -func forceHTTP(u *url.URL) { - if strings.ToLower(u.Scheme) == "https" { - u.Scheme = "http" - } -} - -func removeDuplicateSlashes(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") - } -} - -func removeWWW(u *url.URL) { - if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") { - u.Host = u.Host[4:] - } -} - -func addWWW(u *url.URL) { - if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") { - u.Host = "www." + u.Host - } -} - -func sortQuery(u *url.URL) { - q := u.Query() - - if len(q) > 0 { - arKeys := make([]string, len(q)) - i := 0 - for k := range q { - arKeys[i] = k - i++ - } - sort.Strings(arKeys) - buf := new(bytes.Buffer) - for _, k := range arKeys { - sort.Strings(q[k]) - for _, v := range q[k] { - if buf.Len() > 0 { - buf.WriteRune('&') - } - buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v))) - } - } - - // Rebuild the raw query string - u.RawQuery = buf.String() - } -} - -func decodeDWORDHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 { - var parts [4]int64 - - dword, _ := strconv.ParseInt(matches[1], 10, 0) - for i, shift := range []uint{24, 16, 8, 0} { - parts[i] = dword >> shift & 0xFF - } - u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2]) - } - } -} - -func decodeOctalHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 { - var parts [4]int64 - - for i := 1; i <= 4; i++ { - parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0) - } - u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5]) - } - } -} - -func decodeHexHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 { - // Conversion is safe because of regex validation - parsed, _ := strconv.ParseInt(matches[1], 16, 0) - // Set host as DWORD (base 10) encoded host - u.Host = fmt.Sprintf("%d%s", parsed, matches[2]) - // The rest is the same as decoding a DWORD host - decodeDWORDHost(u) - } - } -} - -func removeUnncessaryHostDots(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 { - // Trim the leading and trailing dots - u.Host = strings.Trim(matches[1], ".") - if len(matches) > 2 { - u.Host += matches[2] - } - } - } -} - -func removeEmptyPortSeparator(u *url.URL) { - if len(u.Host) > 0 { - u.Host = rxEmptyPort.ReplaceAllString(u.Host, "") - } -} diff --git a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml deleted file mode 100644 index ba6b225f9..000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - tip - -install: - - go build . - -script: - - go test -v diff --git a/vendor/github.com/PuerkitoBio/urlesc/README.md b/vendor/github.com/PuerkitoBio/urlesc/README.md deleted file mode 100644 index 57aff0a53..000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/README.md +++ /dev/null @@ -1,16 +0,0 @@ -urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.svg?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc) -====== - -Package urlesc implements query escaping as per RFC 3986. - -It contains some parts of the net/url package, modified so as to allow -some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)). - -## Install - - go get github.com/PuerkitoBio/urlesc - -## License - -Go license (BSD-3-Clause) - diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go deleted file mode 100644 index 1b8462459..000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package urlesc implements query escaping as per RFC 3986. -// It contains some parts of the net/url package, modified so as to allow -// some reserved characters incorrectly escaped by net/url. -// See https://github.com/golang/go/issues/5684 -package urlesc - -import ( - "bytes" - "net/url" - "strings" -) - -type encoding int - -const ( - encodePath encoding = 1 + iota - encodeUserPassword - encodeQueryComponent - encodeFragment -) - -// Return true if the specified character should be escaped when -// appearing in a URL string, according to RFC 3986. -func shouldEscape(c byte, mode encoding) bool { - // §2.3 Unreserved characters (alphanum) - if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { - return false - } - - switch c { - case '-', '.', '_', '~': // §2.3 Unreserved characters (mark) - return false - - // §2.2 Reserved characters (reserved) - case ':', '/', '?', '#', '[', ']', '@', // gen-delims - '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims - // Different sections of the URL allow a few of - // the reserved characters to appear unescaped. - switch mode { - case encodePath: // §3.3 - // The RFC allows sub-delims and : @. - // '/', '[' and ']' can be used to assign meaning to individual path - // segments. This package only manipulates the path as a whole, - // so we allow those as well. That leaves only ? and # to escape. - return c == '?' || c == '#' - - case encodeUserPassword: // §3.2.1 - // The RFC allows : and sub-delims in - // userinfo. The parsing of userinfo treats ':' as special so we must escape - // all the gen-delims. - return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@' - - case encodeQueryComponent: // §3.4 - // The RFC allows / and ?. - return c != '/' && c != '?' - - case encodeFragment: // §4.1 - // The RFC text is silent but the grammar allows - // everything, so escape nothing but # - return c == '#' - } - } - - // Everything else must be escaped. - return true -} - -// QueryEscape escapes the string so it can be safely placed -// inside a URL query. -func QueryEscape(s string) string { - return escape(s, encodeQueryComponent) -} - -func escape(s string, mode encoding) string { - spaceCount, hexCount := 0, 0 - for i := 0; i < len(s); i++ { - c := s[i] - if shouldEscape(c, mode) { - if c == ' ' && mode == encodeQueryComponent { - spaceCount++ - } else { - hexCount++ - } - } - } - - if spaceCount == 0 && hexCount == 0 { - return s - } - - t := make([]byte, len(s)+2*hexCount) - j := 0 - for i := 0; i < len(s); i++ { - switch c := s[i]; { - case c == ' ' && mode == encodeQueryComponent: - t[j] = '+' - j++ - case shouldEscape(c, mode): - t[j] = '%' - t[j+1] = "0123456789ABCDEF"[c>>4] - t[j+2] = "0123456789ABCDEF"[c&15] - j += 3 - default: - t[j] = s[i] - j++ - } - } - return string(t) -} - -var uiReplacer = strings.NewReplacer( - "%21", "!", - "%27", "'", - "%28", "(", - "%29", ")", - "%2A", "*", -) - -// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986. -func unescapeUserinfo(s string) string { - return uiReplacer.Replace(s) -} - -// Escape reassembles the URL into a valid URL string. -// The general form of the result is one of: -// -// scheme:opaque -// scheme://userinfo@host/path?query#fragment -// -// If u.Opaque is non-empty, String uses the first form; -// otherwise it uses the second form. -// -// In the second form, the following rules apply: -// - if u.Scheme is empty, scheme: is omitted. -// - if u.User is nil, userinfo@ is omitted. -// - if u.Host is empty, host/ is omitted. -// - if u.Scheme and u.Host are empty and u.User is nil, -// the entire scheme://userinfo@host/ is omitted. -// - if u.Host is non-empty and u.Path begins with a /, -// the form host/path does not add its own /. -// - if u.RawQuery is empty, ?query is omitted. -// - if u.Fragment is empty, #fragment is omitted. -func Escape(u *url.URL) string { - var buf bytes.Buffer - if u.Scheme != "" { - buf.WriteString(u.Scheme) - buf.WriteByte(':') - } - if u.Opaque != "" { - buf.WriteString(u.Opaque) - } else { - if u.Scheme != "" || u.Host != "" || u.User != nil { - buf.WriteString("//") - if ui := u.User; ui != nil { - buf.WriteString(unescapeUserinfo(ui.String())) - buf.WriteByte('@') - } - if h := u.Host; h != "" { - buf.WriteString(h) - } - } - if u.Path != "" && u.Path[0] != '/' && u.Host != "" { - buf.WriteByte('/') - } - buf.WriteString(escape(u.Path, encodePath)) - } - if u.RawQuery != "" { - buf.WriteByte('?') - buf.WriteString(u.RawQuery) - } - if u.Fragment != "" { - buf.WriteByte('#') - buf.WriteString(escape(u.Fragment, encodeFragment)) - } - return buf.String() -} diff --git a/vendor/github.com/emicklei/go-restful/Makefile b/vendor/github.com/emicklei/go-restful/Makefile deleted file mode 100644 index 3a824ac3d..000000000 --- a/vendor/github.com/emicklei/go-restful/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -all: test - -test: - go vet . - go test -cover -v . \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/.gitignore b/vendor/github.com/emicklei/go-restful/v3/.gitignore similarity index 100% rename from vendor/github.com/emicklei/go-restful/.gitignore rename to vendor/github.com/emicklei/go-restful/v3/.gitignore diff --git a/vendor/github.com/emicklei/go-restful/v3/.goconvey b/vendor/github.com/emicklei/go-restful/v3/.goconvey new file mode 100644 index 000000000..8485e986e --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/.goconvey @@ -0,0 +1 @@ +ignore \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/.travis.yml b/vendor/github.com/emicklei/go-restful/v3/.travis.yml similarity index 100% rename from vendor/github.com/emicklei/go-restful/.travis.yml rename to vendor/github.com/emicklei/go-restful/v3/.travis.yml diff --git a/vendor/github.com/emicklei/go-restful/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md similarity index 69% rename from vendor/github.com/emicklei/go-restful/CHANGES.md rename to vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 3a5299474..352018e70 100644 --- a/vendor/github.com/emicklei/go-restful/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,104 +1,178 @@ -# Change history of go-restful (v2 only) +# Change history of go-restful -## v2.16.0 - 2022-07-11 +## [v3.10.2] - 2023-03-09 -- Backported CORS filter. #489 (#493) #503 +- introduced MergePathStrategy to be able to revert behaviour of path concatenation to 3.9.0 + see comment in Readme how to customize this behaviour. -## v2.15.0 - 2020-11-10 +## [v3.10.1] - 2022-11-19 -- Add OPTIONS in Webservice +- fix broken 3.10.0 by using path package for joining paths + +## [v3.10.0] - 2022-10-11 - BROKEN + +- changed tokenizer to match std route match behavior; do not trimright the path (#511) +- Add MIME_ZIP (#512) +- Add MIME_ZIP and HEADER_ContentDisposition (#513) +- Changed how to get query parameter issue #510 + +## [v3.9.0] - 2022-07-21 + +- add support for http.Handler implementations to work as FilterFunction, issue #504 (thanks to https://github.com/ggicci) + +## [v3.8.0] - 2022-06-06 + +- use exact matching of allowed domain entries, issue #489 (#493) + - this changes fixes [security] Authorization Bypass Through User-Controlled Key + by changing the behaviour of the AllowedDomains setting in the CORS filter. + To support the previous behaviour, the CORS filter type now has a AllowedDomainFunc + callback mechanism which is called when a simple domain match fails. +- add test and fix for POST without body and Content-type, issue #492 (#496) +- [Minor] Bad practice to have a mix of Receiver types. (#491) + +## [v3.7.2] - 2021-11-24 + +- restored FilterChain (#482 by SVilgelm) + + +## [v3.7.1] - 2021-10-04 + +- fix problem with contentEncodingEnabled setting (#479) + +## [v3.7.0] - 2021-09-24 + +- feat(parameter): adds additional openapi mappings (#478) + +## [v3.6.0] - 2021-09-18 + +- add support for vendor extensions (#477 thx erraggy) + +## [v3.5.2] - 2021-07-14 + +- fix removing absent route from webservice (#472) + +## [v3.5.1] - 2021-04-12 + +- fix handling no match access selected path +- remove obsolete field + +## [v3.5.0] - 2021-04-10 + +- add check for wildcard (#463) in CORS +- add access to Route from Request, issue #459 (#462) + +## [v3.4.0] - 2020-11-10 + +- Added OPTIONS to WebService + +## [v3.3.2] - 2020-01-23 -## v2.14.3 - 2020-08-31 - Fixed duplicate compression in dispatch. #449 -## v2.14.2 - 2020-08-31 +## [v3.3.1] - 2020-08-31 - Added check on writer to prevent compression of response twice. #447 -## v2.14.0 - 2020-08-19 +## [v3.3.0] - 2020-08-19 - Enable content encoding on Handle and ServeHTTP (#446) - List available representations in 406 body (#437) - Convert to string using rune() (#443) -## v2.13.0 - 2020-06-21 +## [v3.2.0] - 2020-06-21 -- 405 Method Not Allowed must have Allow header (#436) +- 405 Method Not Allowed must have Allow header (#436) (thx Bracken ) - add field allowedMethodsWithoutContentType (#424) -## v2.12.0 +## [v3.1.0] - support describing response headers (#426) - fix openapi examples (#425) -- merge v3 fix (#422) -## v2.11.1 +v3.0.0 + +- fix: use request/response resulting from filter chain +- add Go module + Module consumer should use github.com/emicklei/go-restful/v3 as import path + +v2.10.0 + +- support for Custom Verbs (thanks Vinci Xu <277040271@qq.com>) +- fixed static example (thanks Arthur ) +- simplify code (thanks Christian Muehlhaeuser ) +- added JWT HMAC with SHA-512 authentication code example (thanks Amim Knabben ) + +v2.9.6 + +- small optimization in filter code + +v2.11.1 - fix WriteError return value (#415) -## v2.11.0 +v2.11.0 - allow prefix and suffix in path variable expression (#414) -## v2.9.6 +v2.9.6 - support google custome verb (#413) -## v2.9.5 +v2.9.5 - fix panic in Response.WriteError if err == nil -## v2.9.4 +v2.9.4 - fix issue #400 , parsing mime type quality - Route Builder added option for contentEncodingEnabled (#398) -## v2.9.3 +v2.9.3 - Avoid return of 415 Unsupported Media Type when request body is empty (#396) -## v2.9.2 +v2.9.2 - Reduce allocations in per-request methods to improve performance (#395) -## v2.9.1 +v2.9.1 - Fix issue with default responses and invalid status code 0. (#393) -## v2.9.0 +v2.9.0 - add per Route content encoding setting (overrides container setting) -## v2.8.0 +v2.8.0 - add Request.QueryParameters() - add json-iterator (via build tag) - disable vgo module (until log is moved) -## v2.7.1 +v2.7.1 - add vgo module -## v2.6.1 +v2.6.1 - add JSONNewDecoderFunc to allow custom JSON Decoder usage (go 1.10+) -## v2.6.0 +v2.6.0 - Make JSR 311 routing and path param processing consistent - Adding description to RouteBuilder.Reads() - Update example for Swagger12 and OpenAPI -## 2017-09-13 +2017-09-13 - added route condition functions using `.If(func)` in route building. -## 2017-02-16 +2017-02-16 - solved issue #304, make operation names unique -## 2017-01-30 +2017-01-30 [IMPORTANT] For swagger users, change your import statement to: swagger "github.com/emicklei/go-restful-swagger12" @@ -106,60 +180,60 @@ - moved swagger 1.2 code to go-restful-swagger12 - created TAG 2.0.0 -## 2017-01-27 +2017-01-27 - remove defer request body close - expose Dispatch for testing filters and Routefunctions - swagger response model cannot be array - created TAG 1.0.0 -## 2016-12-22 +2016-12-22 - (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool) -## 2016-11-26 +2016-11-26 - Default change! now use CurlyRouter (was RouterJSR311) - Default change! no more caching of request content - Default change! do not recover from panics -## 2016-09-22 +2016-09-22 - fix the DefaultRequestContentType feature -## 2016-02-14 +2016-02-14 - take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response - add constructors for custom entity accessors for xml and json -## 2015-09-27 +2015-09-27 - rename new WriteStatusAnd... to WriteHeaderAnd... for consistency -## 2015-09-25 +2015-09-25 - fixed problem with changing Header after WriteHeader (issue 235) -## 2015-09-14 +2015-09-14 - changed behavior of WriteHeader (immediate write) and WriteEntity (no status write) - added support for custom EntityReaderWriters. -## 2015-08-06 +2015-08-06 - add support for reading entities from compressed request content - use sync.Pool for compressors of http response and request body - add Description to Parameter for documentation in Swagger UI -## 2015-03-20 +2015-03-20 - add configurable logging -## 2015-03-18 +2015-03-18 - if not specified, the Operation is derived from the Route function -## 2015-03-17 +2015-03-17 - expose Parameter creation functions - make trace logger an interface @@ -168,26 +242,26 @@ - JSR311 router now handles wildcards - add Notes to Route -## 2014-11-27 +2014-11-27 - (api add) PrettyPrint per response. (as proposed in #167) -## 2014-11-12 +2014-11-12 - (api add) ApiVersion(.) for documentation in Swagger UI -## 2014-11-10 +2014-11-10 - (api change) struct fields tagged with "description" show up in Swagger UI -## 2014-10-31 +2014-10-31 - (api change) ReturnsError -> Returns - (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder - fix swagger nested structs - sort Swagger response messages by code -## 2014-10-23 +2014-10-23 - (api add) ReturnsError allows you to document Http codes in swagger - fixed problem with greedy CurlyRouter @@ -201,73 +275,73 @@ - (api add) added AllowedDomains in CORS - (api add) ParameterNamed for detailed documentation -## 2014-04-16 +2014-04-16 - (api add) expose constructor of Request for testing. -## 2014-06-27 +2014-06-27 - (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification). - (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons). -## 2014-07-03 +2014-07-03 - (api add) CORS can be configured with a list of allowed domains -## 2014-03-12 +2014-03-12 - (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter) -## 2014-02-26 +2014-02-26 - (api add) Request now provides information about the matched Route, see method SelectedRoutePath -## 2014-02-17 +2014-02-17 - (api change) renamed parameter constants (go-lint checks) -## 2014-01-10 +2014-01-10 - (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier -## 2014-01-07 +2014-01-07 - (api change) Write* methods in Response now return the error or nil. - added example of serving HTML from a Go template. - fixed comparing Allowed headers in CORS (is now case-insensitive) -## 2013-11-13 +2013-11-13 - (api add) Response knows how many bytes are written to the response body. -## 2013-10-29 +2013-10-29 - (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information. -## 2013-10-04 +2013-10-04 - (api add) Response knows what HTTP status has been written - (api add) Request can have attributes (map of string->interface, also called request-scoped variables -## 2013-09-12 +2013-09-12 - (api change) Router interface simplified - Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths -## 2013-08-05 +2013-08-05 - add OPTIONS support - add CORS support -## 2013-08-27 +2013-08-27 - fixed some reported issues (see github) - (api change) deprecated use of WriteError; use WriteErrorString instead -## 2014-04-15 +2014-04-15 - (fix) v1.0.1 tag: fix Issue 111: WriteErrorString -## 2013-08-08 +2013-08-08 - (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer. - (api add) the swagger package has be extended to have a UI per container. @@ -280,38 +354,38 @@ Important API changes: - (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead. -## 2013-07-06 +2013-07-06 - (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature. -## 2013-06-19 +2013-06-19 - (improve) DoNotRecover option, moved request body closer, improved ReadEntity -## 2013-06-03 +2013-06-03 - (api change) removed Dispatcher interface, hide PathExpression - changed receiver names of type functions to be more idiomatic Go -## 2013-06-02 +2013-06-02 - (optimize) Cache the RegExp compilation of Paths. -## 2013-05-22 +2013-05-22 - (api add) Added support for request/response filter functions -## 2013-05-18 +2013-05-18 - (api add) Added feature to change the default Http Request Dispatch function (travis cline) - (api change) Moved Swagger Webservice to swagger package (see example restful-user) -## [2012-11-14 .. 2013-05-18> +[2012-11-14 .. 2013-05-18> - See https://github.com/emicklei/go-restful/commits -## 2012-11-14 +2012-11-14 - Initial commit diff --git a/vendor/github.com/emicklei/go-restful/LICENSE b/vendor/github.com/emicklei/go-restful/v3/LICENSE similarity index 100% rename from vendor/github.com/emicklei/go-restful/LICENSE rename to vendor/github.com/emicklei/go-restful/v3/LICENSE diff --git a/vendor/github.com/emicklei/go-restful/v3/Makefile b/vendor/github.com/emicklei/go-restful/v3/Makefile new file mode 100644 index 000000000..16d0b80bb --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/Makefile @@ -0,0 +1,8 @@ +all: test + +test: + go vet . + go test -cover -v . + +ex: + find ./examples -type f -name "*.go" | xargs -I {} go build -o /tmp/ignore {} \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md similarity index 80% rename from vendor/github.com/emicklei/go-restful/README.md rename to vendor/github.com/emicklei/go-restful/v3/README.md index e5878a668..85da90128 100644 --- a/vendor/github.com/emicklei/go-restful/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -7,7 +7,7 @@ package for building REST-style Web Services using Google Go [![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) [![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) -- [Code examples using v3](https://github.com/emicklei/go-restful/tree/master/examples) +- [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples) REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping: @@ -21,23 +21,23 @@ REST asks developers to use HTTP methods explicitly and in a way that's consiste ### Usage -#### Using Go Modules +#### Without Go Modules -As of version `v3.0.0` (on the v3 branch), this package supports Go modules. +All versions up to `v2.*.*` (on the master) are not supporting Go modules. ``` import ( - restful "github.com/emicklei/go-restful/v3" + restful "github.com/emicklei/go-restful" ) ``` -#### Without Go Modules +#### Using Go Modules -All versions up to `v2.*.*` (on the master) are not supporting Go modules. +As of version `v3.0.0` (on the v3 branch), this package supports Go modules. ``` import ( - restful "github.com/emicklei/go-restful" + restful "github.com/emicklei/go-restful/v3" ) ``` @@ -61,16 +61,16 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo ... } ``` - -[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/user-resource/restful-user-resource.go) - + +[Full API of a UserResource](https://github.com/emicklei/go-restful/blob/v3/examples/user-resource/restful-user-resource.go) + ### Features - Routes for request → function mapping with path parameter (e.g. {id} but also prefix_{var} and {var}_suffix) support - Configurable router: - (default) Fast routing algorithm that allows static elements, [google custom method](https://cloud.google.com/apis/design/custom_methods), regular expressions and dynamic parameters in the URL path (e.g. /resource/name:customVerb, /meetings/{id} or /static/{subpath:*}) - Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions -- Request API for reading structs from JSON/XML and accesing parameters (path,query,header) +- Request API for reading structs from JSON/XML and accessing parameters (path,query,header) - Response API for writing structs to JSON/XML and setting headers - Customizable encoding using EntityReaderWriter registration - Filters for intercepting the request → response flow on Service or Route level @@ -84,6 +84,7 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo - Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...) - Configurable (trace) logging - Customizable gzip/deflate readers and writers using CompressorProvider registration +- Inject your own http.Handler using the `HttpMiddlewareHandlerToFilter` function ## How to customize There are several hooks to customize the behavior of the go-restful package. @@ -94,12 +95,15 @@ There are several hooks to customize the behavior of the go-restful package. - Trace logging - Compression - Encoders for other serializers -- Use [jsoniter](https://github.com/json-iterator/go) by build this package using a tag, e.g. `go build -tags=jsoniter .` - -TODO: write examples of these. +- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .` +- Use the variable `MergePathStrategy` to change the behaviour of composing the Route path given a root path and a local route path + - versions >= 3.10.1 has set the value to `PathJoinStrategy` that fixes a reported [security issue](https://github.com/advisories/GHSA-r48q-9g5r-8q2h) but may cause your services not to work correctly anymore. + - versions <= 3.9 had the behaviour that can be restored in newer versions by setting the value to `TrimSlashStrategy`. + - you can set value to a custom implementation (must implement MergePathStrategyFunc) ## Resources +- [Example programs](./examples) - [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/) - [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/) - [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful) @@ -108,4 +112,4 @@ TODO: write examples of these. Type ```git shortlog -s``` for a full list of contributors. -© 2012 - 2020, http://ernestmicklei.com. MIT License. Contributions are welcome. +© 2012 - 2022, http://ernestmicklei.com. MIT License. Contributions are welcome. diff --git a/vendor/github.com/emicklei/go-restful/v3/SECURITY.md b/vendor/github.com/emicklei/go-restful/v3/SECURITY.md new file mode 100644 index 000000000..810d3b510 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| v3.7.x | :white_check_mark: | +| < v3.0.1 | :x: | + +## Reporting a Vulnerability + +Create an Issue and put the label `[security]` in the title of the issue. +Valid reported security issues are expected to be solved within a week. diff --git a/vendor/github.com/emicklei/go-restful/Srcfile b/vendor/github.com/emicklei/go-restful/v3/Srcfile similarity index 100% rename from vendor/github.com/emicklei/go-restful/Srcfile rename to vendor/github.com/emicklei/go-restful/v3/Srcfile diff --git a/vendor/github.com/emicklei/go-restful/bench_test.sh b/vendor/github.com/emicklei/go-restful/v3/bench_test.sh similarity index 100% rename from vendor/github.com/emicklei/go-restful/bench_test.sh rename to vendor/github.com/emicklei/go-restful/v3/bench_test.sh diff --git a/vendor/github.com/emicklei/go-restful/compress.go b/vendor/github.com/emicklei/go-restful/v3/compress.go similarity index 92% rename from vendor/github.com/emicklei/go-restful/compress.go rename to vendor/github.com/emicklei/go-restful/v3/compress.go index 220b37712..1ff239f99 100644 --- a/vendor/github.com/emicklei/go-restful/compress.go +++ b/vendor/github.com/emicklei/go-restful/v3/compress.go @@ -83,7 +83,11 @@ func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error } // WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested. -func wantsCompressedResponse(httpRequest *http.Request) (bool, string) { +// It also inspects the httpWriter whether its content-encoding is already set (non-empty). +func wantsCompressedResponse(httpRequest *http.Request, httpWriter http.ResponseWriter) (bool, string) { + if contentEncoding := httpWriter.Header().Get(HEADER_ContentEncoding); contentEncoding != "" { + return false, "" + } header := httpRequest.Header.Get(HEADER_AcceptEncoding) gi := strings.Index(header, ENCODING_GZIP) zi := strings.Index(header, ENCODING_DEFLATE) diff --git a/vendor/github.com/emicklei/go-restful/compressor_cache.go b/vendor/github.com/emicklei/go-restful/v3/compressor_cache.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/compressor_cache.go rename to vendor/github.com/emicklei/go-restful/v3/compressor_cache.go diff --git a/vendor/github.com/emicklei/go-restful/compressor_pools.go b/vendor/github.com/emicklei/go-restful/v3/compressor_pools.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/compressor_pools.go rename to vendor/github.com/emicklei/go-restful/v3/compressor_pools.go diff --git a/vendor/github.com/emicklei/go-restful/compressors.go b/vendor/github.com/emicklei/go-restful/v3/compressors.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/compressors.go rename to vendor/github.com/emicklei/go-restful/v3/compressors.go diff --git a/vendor/github.com/emicklei/go-restful/constants.go b/vendor/github.com/emicklei/go-restful/v3/constants.go similarity index 89% rename from vendor/github.com/emicklei/go-restful/constants.go rename to vendor/github.com/emicklei/go-restful/v3/constants.go index 203439c5e..2328bde6c 100644 --- a/vendor/github.com/emicklei/go-restful/constants.go +++ b/vendor/github.com/emicklei/go-restful/v3/constants.go @@ -7,12 +7,14 @@ package restful const ( MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces() MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces() + MIME_ZIP = "application/zip" // Accept or Content-Type used in Consumes() and/or Produces() MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default HEADER_Allow = "Allow" HEADER_Accept = "Accept" HEADER_Origin = "Origin" HEADER_ContentType = "Content-Type" + HEADER_ContentDisposition = "Content-Disposition" HEADER_LastModified = "Last-Modified" HEADER_AcceptEncoding = "Accept-Encoding" HEADER_ContentEncoding = "Content-Encoding" diff --git a/vendor/github.com/emicklei/go-restful/container.go b/vendor/github.com/emicklei/go-restful/v3/container.go similarity index 97% rename from vendor/github.com/emicklei/go-restful/container.go rename to vendor/github.com/emicklei/go-restful/v3/container.go index afca312a4..dd56246dd 100644 --- a/vendor/github.com/emicklei/go-restful/container.go +++ b/vendor/github.com/emicklei/go-restful/v3/container.go @@ -14,7 +14,7 @@ import ( "strings" "sync" - "github.com/emicklei/go-restful/log" + "github.com/emicklei/go-restful/v3/log" ) // Container holds a collection of WebServices and a http.ServeMux to dispatch http requests. @@ -261,7 +261,7 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R contentEncodingEnabled = *route.contentEncodingEnabled } if contentEncodingEnabled { - doCompress, encoding := wantsCompressedResponse(httpRequest) + doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter) if doCompress { var err error writer, err = NewCompressingResponseWriter(httpWriter, encoding) @@ -287,7 +287,12 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R allFilters = append(allFilters, c.containerFilters...) allFilters = append(allFilters, webService.filters...) allFilters = append(allFilters, route.Filters...) - chain := FilterChain{Filters: allFilters, Target: route.Function} + chain := FilterChain{ + Filters: allFilters, + Target: route.Function, + ParameterDocs: route.ParameterDocs, + Operation: route.Operation, + } chain.ProcessFilter(wrappedRequest, wrappedResponse) } else { // no filters, handle request by route @@ -327,7 +332,7 @@ func (c *Container) ServeHTTP(httpWriter http.ResponseWriter, httpRequest *http. } }() - doCompress, encoding := wantsCompressedResponse(httpRequest) + doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter) if doCompress { var err error writer, err = NewCompressingResponseWriter(httpWriter, encoding) @@ -360,7 +365,7 @@ func (c *Container) Handle(pattern string, handler http.Handler) { }() if c.contentEncodingEnabled { - doCompress, encoding := wantsCompressedResponse(httpRequest) + doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter) if doCompress { var err error writer, err = NewCompressingResponseWriter(httpWriter, encoding) diff --git a/vendor/github.com/emicklei/go-restful/cors_filter.go b/vendor/github.com/emicklei/go-restful/v3/cors_filter.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/cors_filter.go rename to vendor/github.com/emicklei/go-restful/v3/cors_filter.go diff --git a/vendor/github.com/emicklei/go-restful/coverage.sh b/vendor/github.com/emicklei/go-restful/v3/coverage.sh similarity index 100% rename from vendor/github.com/emicklei/go-restful/coverage.sh rename to vendor/github.com/emicklei/go-restful/v3/coverage.sh diff --git a/vendor/github.com/emicklei/go-restful/curly.go b/vendor/github.com/emicklei/go-restful/v3/curly.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/curly.go rename to vendor/github.com/emicklei/go-restful/v3/curly.go diff --git a/vendor/github.com/emicklei/go-restful/curly_route.go b/vendor/github.com/emicklei/go-restful/v3/curly_route.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/curly_route.go rename to vendor/github.com/emicklei/go-restful/v3/curly_route.go diff --git a/vendor/github.com/emicklei/go-restful/custom_verb.go b/vendor/github.com/emicklei/go-restful/v3/custom_verb.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/custom_verb.go rename to vendor/github.com/emicklei/go-restful/v3/custom_verb.go diff --git a/vendor/github.com/emicklei/go-restful/doc.go b/vendor/github.com/emicklei/go-restful/v3/doc.go similarity index 95% rename from vendor/github.com/emicklei/go-restful/doc.go rename to vendor/github.com/emicklei/go-restful/v3/doc.go index f7c16b01f..69b13057d 100644 --- a/vendor/github.com/emicklei/go-restful/doc.go +++ b/vendor/github.com/emicklei/go-restful/v3/doc.go @@ -28,7 +28,7 @@ This package has the logic to find the best matching Route and if found, call it The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response. -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation. +See the example https://github.com/emicklei/go-restful/blob/v3/examples/user-resource/restful-user-resource.go with a full implementation. Regular expression matching Routes @@ -82,7 +82,7 @@ These are processed before calling the function associated with the Route. // install 2 chained route filters (processed before calling findUser) ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser)) -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations. +See the example https://github.com/emicklei/go-restful/blob/v3/examples/filters/restful-filters.go with full implementations. Response Encoding @@ -93,7 +93,7 @@ Two encodings are supported: gzip and deflate. To enable this for all responses: If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding. Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route. -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go +See the example https://github.com/emicklei/go-restful/blob/v3/examples/encoding/restful-encoding-filter.go OPTIONS support diff --git a/vendor/github.com/emicklei/go-restful/entity_accessors.go b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/entity_accessors.go rename to vendor/github.com/emicklei/go-restful/v3/entity_accessors.go diff --git a/vendor/github.com/emicklei/go-restful/v3/extensions.go b/vendor/github.com/emicklei/go-restful/v3/extensions.go new file mode 100644 index 000000000..5023fa049 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/extensions.go @@ -0,0 +1,21 @@ +package restful + +// Copyright 2021 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +// ExtensionProperties provides storage of vendor extensions for entities +type ExtensionProperties struct { + // Extensions vendor extensions used to describe extra functionality + // (https://swagger.io/docs/specification/2-0/swagger-extensions/) + Extensions map[string]interface{} +} + +// AddExtension adds or updates a key=value pair to the extension map. +func (ep *ExtensionProperties) AddExtension(key string, value interface{}) { + if ep.Extensions == nil { + ep.Extensions = map[string]interface{}{key: value} + } else { + ep.Extensions[key] = value + } +} diff --git a/vendor/github.com/emicklei/go-restful/filter.go b/vendor/github.com/emicklei/go-restful/v3/filter.go similarity index 79% rename from vendor/github.com/emicklei/go-restful/filter.go rename to vendor/github.com/emicklei/go-restful/v3/filter.go index c23bfb591..fd88c536c 100644 --- a/vendor/github.com/emicklei/go-restful/filter.go +++ b/vendor/github.com/emicklei/go-restful/v3/filter.go @@ -6,9 +6,11 @@ package restful // FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction. type FilterChain struct { - Filters []FilterFunction // ordered list of FilterFunction - Index int // index into filters that is currently in progress - Target RouteFunction // function to call after passing all filters + Filters []FilterFunction // ordered list of FilterFunction + Index int // index into filters that is currently in progress + Target RouteFunction // function to call after passing all filters + ParameterDocs []*Parameter // the parameter docs for the route + Operation string // the name of the operation } // ProcessFilter passes the request,response pair through the next of Filters. diff --git a/vendor/github.com/emicklei/go-restful/v3/filter_adapter.go b/vendor/github.com/emicklei/go-restful/v3/filter_adapter.go new file mode 100644 index 000000000..c246512fc --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/filter_adapter.go @@ -0,0 +1,21 @@ +package restful + +import ( + "net/http" +) + +// HttpMiddlewareHandler is a function that takes a http.Handler and returns a http.Handler +type HttpMiddlewareHandler func(http.Handler) http.Handler + +// HttpMiddlewareHandlerToFilter converts a HttpMiddlewareHandler to a FilterFunction. +func HttpMiddlewareHandlerToFilter(middleware HttpMiddlewareHandler) FilterFunction { + return func(req *Request, resp *Response, chain *FilterChain) { + next := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + req.Request = r + resp.ResponseWriter = rw + chain.ProcessFilter(req, resp) + }) + + middleware(next).ServeHTTP(resp.ResponseWriter, req.Request) + } +} diff --git a/vendor/github.com/emicklei/go-restful/json.go b/vendor/github.com/emicklei/go-restful/v3/json.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/json.go rename to vendor/github.com/emicklei/go-restful/v3/json.go diff --git a/vendor/github.com/emicklei/go-restful/jsoniter.go b/vendor/github.com/emicklei/go-restful/v3/jsoniter.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/jsoniter.go rename to vendor/github.com/emicklei/go-restful/v3/jsoniter.go diff --git a/vendor/github.com/emicklei/go-restful/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go similarity index 96% rename from vendor/github.com/emicklei/go-restful/jsr311.go rename to vendor/github.com/emicklei/go-restful/v3/jsr311.go index 9cfd59a1c..07a0c91e9 100644 --- a/vendor/github.com/emicklei/go-restful/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go @@ -151,6 +151,16 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R for _, candidate := range previous { available = append(available, candidate.Produces...) } + // if POST,PUT,PATCH without body + method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") + if (method == http.MethodPost || + method == http.MethodPut || + method == http.MethodPatch) && length == "" { + return nil, NewError( + http.StatusUnsupportedMediaType, + fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), + ) + } return nil, NewError( http.StatusNotAcceptable, fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")), diff --git a/vendor/github.com/emicklei/go-restful/log/log.go b/vendor/github.com/emicklei/go-restful/v3/log/log.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/log/log.go rename to vendor/github.com/emicklei/go-restful/v3/log/log.go diff --git a/vendor/github.com/emicklei/go-restful/logger.go b/vendor/github.com/emicklei/go-restful/v3/logger.go similarity index 95% rename from vendor/github.com/emicklei/go-restful/logger.go rename to vendor/github.com/emicklei/go-restful/v3/logger.go index 6595df002..29202726f 100644 --- a/vendor/github.com/emicklei/go-restful/logger.go +++ b/vendor/github.com/emicklei/go-restful/v3/logger.go @@ -4,7 +4,7 @@ package restful // Use of this source code is governed by a license // that can be found in the LICENSE file. import ( - "github.com/emicklei/go-restful/log" + "github.com/emicklei/go-restful/v3/log" ) var trace bool = false diff --git a/vendor/github.com/emicklei/go-restful/mime.go b/vendor/github.com/emicklei/go-restful/v3/mime.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/mime.go rename to vendor/github.com/emicklei/go-restful/v3/mime.go diff --git a/vendor/github.com/emicklei/go-restful/options_filter.go b/vendor/github.com/emicklei/go-restful/v3/options_filter.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/options_filter.go rename to vendor/github.com/emicklei/go-restful/v3/options_filter.go diff --git a/vendor/github.com/emicklei/go-restful/parameter.go b/vendor/github.com/emicklei/go-restful/v3/parameter.go similarity index 60% rename from vendor/github.com/emicklei/go-restful/parameter.go rename to vendor/github.com/emicklei/go-restful/v3/parameter.go index febe2cc17..0b851bb43 100644 --- a/vendor/github.com/emicklei/go-restful/parameter.go +++ b/vendor/github.com/emicklei/go-restful/v3/parameter.go @@ -1,5 +1,7 @@ package restful +import "sort" + // Copyright 2013 Ernest Micklei. All rights reserved. // Use of this source code is governed by a license // that can be found in the LICENSE file. @@ -55,13 +57,25 @@ type Parameter struct { // ParameterData represents the state of a Parameter. // It is made public to make it accessible to e.g. the Swagger package. type ParameterData struct { + ExtensionProperties Name, Description, DataType, DataFormat string Kind int Required bool - AllowableValues map[string]string - AllowMultiple bool - DefaultValue string - CollectionFormat string + // AllowableValues is deprecated. Use PossibleValues instead + AllowableValues map[string]string + PossibleValues []string + AllowMultiple bool + AllowEmptyValue bool + DefaultValue string + CollectionFormat string + Pattern string + Minimum *float64 + Maximum *float64 + MinLength *int64 + MaxLength *int64 + MinItems *int64 + MaxItems *int64 + UniqueItems bool } // Data returns the state of the Parameter @@ -114,9 +128,38 @@ func (p *Parameter) AllowMultiple(multiple bool) *Parameter { return p } -// AllowableValues sets the allowableValues field and returns the receiver +// AddExtension adds or updates a key=value pair to the extension map +func (p *Parameter) AddExtension(key string, value interface{}) *Parameter { + p.data.AddExtension(key, value) + return p +} + +// AllowEmptyValue sets the AllowEmptyValue field and returns the receiver +func (p *Parameter) AllowEmptyValue(multiple bool) *Parameter { + p.data.AllowEmptyValue = multiple + return p +} + +// AllowableValues is deprecated. Use PossibleValues instead. Both will be set. func (p *Parameter) AllowableValues(values map[string]string) *Parameter { p.data.AllowableValues = values + + allowableSortedKeys := make([]string, 0, len(values)) + for k := range values { + allowableSortedKeys = append(allowableSortedKeys, k) + } + sort.Strings(allowableSortedKeys) + + p.data.PossibleValues = make([]string, 0, len(values)) + for _, k := range allowableSortedKeys { + p.data.PossibleValues = append(p.data.PossibleValues, values[k]) + } + return p +} + +// PossibleValues sets the possible values field and returns the receiver +func (p *Parameter) PossibleValues(values []string) *Parameter { + p.data.PossibleValues = values return p } @@ -149,3 +192,51 @@ func (p *Parameter) CollectionFormat(format CollectionFormat) *Parameter { p.data.CollectionFormat = format.String() return p } + +// Pattern sets the pattern field and returns the receiver +func (p *Parameter) Pattern(pattern string) *Parameter { + p.data.Pattern = pattern + return p +} + +// Minimum sets the minimum field and returns the receiver +func (p *Parameter) Minimum(minimum float64) *Parameter { + p.data.Minimum = &minimum + return p +} + +// Maximum sets the maximum field and returns the receiver +func (p *Parameter) Maximum(maximum float64) *Parameter { + p.data.Maximum = &maximum + return p +} + +// MinLength sets the minLength field and returns the receiver +func (p *Parameter) MinLength(minLength int64) *Parameter { + p.data.MinLength = &minLength + return p +} + +// MaxLength sets the maxLength field and returns the receiver +func (p *Parameter) MaxLength(maxLength int64) *Parameter { + p.data.MaxLength = &maxLength + return p +} + +// MinItems sets the minItems field and returns the receiver +func (p *Parameter) MinItems(minItems int64) *Parameter { + p.data.MinItems = &minItems + return p +} + +// MaxItems sets the maxItems field and returns the receiver +func (p *Parameter) MaxItems(maxItems int64) *Parameter { + p.data.MaxItems = &maxItems + return p +} + +// UniqueItems sets the uniqueItems field and returns the receiver +func (p *Parameter) UniqueItems(uniqueItems bool) *Parameter { + p.data.UniqueItems = uniqueItems + return p +} diff --git a/vendor/github.com/emicklei/go-restful/path_expression.go b/vendor/github.com/emicklei/go-restful/v3/path_expression.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/path_expression.go rename to vendor/github.com/emicklei/go-restful/v3/path_expression.go diff --git a/vendor/github.com/emicklei/go-restful/path_processor.go b/vendor/github.com/emicklei/go-restful/v3/path_processor.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/path_processor.go rename to vendor/github.com/emicklei/go-restful/v3/path_processor.go diff --git a/vendor/github.com/emicklei/go-restful/request.go b/vendor/github.com/emicklei/go-restful/v3/request.go similarity index 85% rename from vendor/github.com/emicklei/go-restful/request.go rename to vendor/github.com/emicklei/go-restful/v3/request.go index 05d768117..0020095e8 100644 --- a/vendor/github.com/emicklei/go-restful/request.go +++ b/vendor/github.com/emicklei/go-restful/v3/request.go @@ -13,11 +13,10 @@ var defaultRequestContentType string // Request is a wrapper for a http Request that provides convenience methods type Request struct { - Request *http.Request - pathParameters map[string]string - attributes map[string]interface{} // for storing request-scoped values - selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees - selectedRoute *Route + Request *http.Request + pathParameters map[string]string + attributes map[string]interface{} // for storing request-scoped values + selectedRoute *Route // is nil when no route was matched } func NewRequest(httpRequest *http.Request) *Request { @@ -32,7 +31,8 @@ func NewRequest(httpRequest *http.Request) *Request { // a "Unable to unmarshal content of type:" response is returned. // Valid values are restful.MIME_JSON and restful.MIME_XML // Example: -// restful.DefaultRequestContentType(restful.MIME_JSON) +// +// restful.DefaultRequestContentType(restful.MIME_JSON) func DefaultRequestContentType(mime string) { defaultRequestContentType = mime } @@ -49,7 +49,7 @@ func (r *Request) PathParameters() map[string]string { // QueryParameter returns the (first) Query parameter value by its name func (r *Request) QueryParameter(name string) string { - return r.Request.FormValue(name) + return r.Request.URL.Query().Get(name) } // QueryParameters returns the all the query parameters values by name @@ -114,11 +114,20 @@ func (r Request) Attribute(name string) interface{} { } // SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees +// If no route was matched then return an empty string. func (r Request) SelectedRoutePath() string { + if r.selectedRoute == nil { + return "" + } + // skip creating an accessor return r.selectedRoute.Path } -// SelectedRoute return the Route that selected by the container +// SelectedRoute returns a reader to access the selected Route by the container +// Returns nil if no route was matched. func (r Request) SelectedRoute() RouteReader { + if r.selectedRoute == nil { + return nil + } return routeAccessor{route: r.selectedRoute} } diff --git a/vendor/github.com/emicklei/go-restful/response.go b/vendor/github.com/emicklei/go-restful/v3/response.go similarity index 99% rename from vendor/github.com/emicklei/go-restful/response.go rename to vendor/github.com/emicklei/go-restful/v3/response.go index 8f0b56aa2..a41a92cc2 100644 --- a/vendor/github.com/emicklei/go-restful/response.go +++ b/vendor/github.com/emicklei/go-restful/v3/response.go @@ -109,6 +109,9 @@ func (r *Response) EntityWriter() (EntityReaderWriter, bool) { if DefaultResponseMimeType == MIME_XML { return entityAccessRegistry.accessorAt(MIME_XML) } + if DefaultResponseMimeType == MIME_ZIP { + return entityAccessRegistry.accessorAt(MIME_ZIP) + } // Fallback to whatever the route says it can produce. // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html for _, each := range r.routeProduces { diff --git a/vendor/github.com/emicklei/go-restful/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go similarity index 96% rename from vendor/github.com/emicklei/go-restful/route.go rename to vendor/github.com/emicklei/go-restful/v3/route.go index 6ac261293..ea05b3da8 100644 --- a/vendor/github.com/emicklei/go-restful/route.go +++ b/vendor/github.com/emicklei/go-restful/v3/route.go @@ -19,6 +19,7 @@ type RouteSelectionConditionFunction func(httpRequest *http.Request) bool // Route binds a HTTP Method,Path,Consumes combination to a RouteFunction. type Route struct { + ExtensionProperties Method string Produces []string Consumes []string @@ -163,15 +164,17 @@ func tokenizePath(path string) []string { if "/" == path { return nil } - return strings.Split(strings.Trim(path, "/"), "/") + return strings.Split(strings.TrimLeft(path, "/"), "/") } // for debugging -func (r Route) String() string { +func (r *Route) String() string { return r.Method + " " + r.Path } // EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. Overrides the container.contentEncodingEnabled value. -func (r Route) EnableContentEncoding(enabled bool) { +func (r *Route) EnableContentEncoding(enabled bool) { r.contentEncodingEnabled = &enabled } + +var TrimRightSlashEnabled = false diff --git a/vendor/github.com/emicklei/go-restful/route_builder.go b/vendor/github.com/emicklei/go-restful/v3/route_builder.go similarity index 88% rename from vendor/github.com/emicklei/go-restful/route_builder.go rename to vendor/github.com/emicklei/go-restful/v3/route_builder.go index 1d67a4c23..827f471de 100644 --- a/vendor/github.com/emicklei/go-restful/route_builder.go +++ b/vendor/github.com/emicklei/go-restful/v3/route_builder.go @@ -7,12 +7,13 @@ package restful import ( "fmt" "os" + "path" "reflect" "runtime" "strings" "sync/atomic" - "github.com/emicklei/go-restful/log" + "github.com/emicklei/go-restful/v3/log" ) // RouteBuilder is a helper to construct Routes. @@ -38,6 +39,7 @@ type RouteBuilder struct { errorMap map[int]ResponseError defaultResponse *ResponseError metadata map[string]interface{} + extensions map[string]interface{} deprecated bool contentEncodingEnabled *bool } @@ -45,11 +47,12 @@ type RouteBuilder struct { // Do evaluates each argument with the RouteBuilder itself. // This allows you to follow DRY principles without breaking the fluent programming style. // Example: -// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500)) // -// func Returns500(b *RouteBuilder) { -// b.Returns(500, "Internal Server Error", restful.ServiceError{}) -// } +// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500)) +// +// func Returns500(b *RouteBuilder) { +// b.Returns(500, "Internal Server Error", restful.ServiceError{}) +// } func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder { for _, each := range oneArgBlocks { each(b) @@ -204,13 +207,22 @@ func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder { return b } +// AddExtension adds or updates a key=value pair to the extensions map. +func (b *RouteBuilder) AddExtension(key string, value interface{}) *RouteBuilder { + if b.extensions == nil { + b.extensions = map[string]interface{}{} + } + b.extensions[key] = value + return b +} + // Deprecate sets the value of deprecated to true. Deprecated routes have a special UI treatment to warn against use func (b *RouteBuilder) Deprecate() *RouteBuilder { b.deprecated = true return b } -// AllowedMethodsWithoutContentType overides the default list GET,HEAD,OPTIONS,DELETE,TRACE +// AllowedMethodsWithoutContentType overrides the default list GET,HEAD,OPTIONS,DELETE,TRACE // If a request does not include a content-type header then // depending on the method, it may return a 415 Unsupported Media. // Must have uppercase HTTP Method names such as GET,HEAD,OPTIONS,... @@ -221,6 +233,7 @@ func (b *RouteBuilder) AllowedMethodsWithoutContentType(methods []string) *Route // ResponseError represents a response; not necessarily an error. type ResponseError struct { + ExtensionProperties Code int Message string Model interface{} @@ -335,12 +348,33 @@ func (b *RouteBuilder) Build() Route { contentEncodingEnabled: b.contentEncodingEnabled, allowedMethodsWithoutContentType: b.allowedMethodsWithoutContentType, } + route.Extensions = b.extensions route.postBuild() return route } -func concatPath(path1, path2 string) string { - return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/") +type MergePathStrategyFunc func(rootPath, routePath string) string + +var ( + // behavior >= 3.10 + PathJoinStrategy = func(rootPath, routePath string) string { + return path.Join(rootPath, routePath) + } + + // behavior <= 3.9 + TrimSlashStrategy = func(rootPath, routePath string) string { + return strings.TrimRight(rootPath, "/") + "/" + strings.TrimLeft(routePath, "/") + } + + // MergePathStrategy is the active strategy for merging a Route path when building the routing of all WebServices. + // The value is set to PathJoinStrategy + // PathJoinStrategy is a strategy that is more strict [Security - PRISMA-2022-0227] + MergePathStrategy = PathJoinStrategy +) + +// merge two paths using the current (package global) merge path strategy. +func concatPath(rootPath, routePath string) string { + return MergePathStrategy(rootPath, routePath) } var anonymousFuncCount int32 diff --git a/vendor/github.com/emicklei/go-restful/route_reader.go b/vendor/github.com/emicklei/go-restful/v3/route_reader.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/route_reader.go rename to vendor/github.com/emicklei/go-restful/v3/route_reader.go diff --git a/vendor/github.com/emicklei/go-restful/router.go b/vendor/github.com/emicklei/go-restful/v3/router.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/router.go rename to vendor/github.com/emicklei/go-restful/v3/router.go diff --git a/vendor/github.com/emicklei/go-restful/service_error.go b/vendor/github.com/emicklei/go-restful/v3/service_error.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/service_error.go rename to vendor/github.com/emicklei/go-restful/v3/service_error.go diff --git a/vendor/github.com/emicklei/go-restful/web_service.go b/vendor/github.com/emicklei/go-restful/v3/web_service.go similarity index 96% rename from vendor/github.com/emicklei/go-restful/web_service.go rename to vendor/github.com/emicklei/go-restful/v3/web_service.go index 2eac41497..789c4df25 100644 --- a/vendor/github.com/emicklei/go-restful/web_service.go +++ b/vendor/github.com/emicklei/go-restful/v3/web_service.go @@ -6,7 +6,7 @@ import ( "reflect" "sync" - "github.com/emicklei/go-restful/log" + "github.com/emicklei/go-restful/v3/log" ) // Copyright 2013 Ernest Micklei. All rights reserved. @@ -188,22 +188,20 @@ func (w *WebService) Route(builder *RouteBuilder) *WebService { // RemoveRoute removes the specified route, looks for something that matches 'path' and 'method' func (w *WebService) RemoveRoute(path, method string) error { - if !w.dynamicRoutes { - return errors.New("dynamic routes are not enabled.") - } - w.routesLock.Lock() - defer w.routesLock.Unlock() - newRoutes := make([]Route, (len(w.routes) - 1)) - current := 0 - for ix := range w.routes { - if w.routes[ix].Method == method && w.routes[ix].Path == path { - continue - } - newRoutes[current] = w.routes[ix] - current++ - } - w.routes = newRoutes - return nil + if !w.dynamicRoutes { + return errors.New("dynamic routes are not enabled.") + } + w.routesLock.Lock() + defer w.routesLock.Unlock() + newRoutes := []Route{} + for _, route := range w.routes { + if route.Method == method && route.Path == path { + continue + } + newRoutes = append(newRoutes, route) + } + w.routes = newRoutes + return nil } // Method creates a new RouteBuilder and initialize its http method diff --git a/vendor/github.com/emicklei/go-restful/web_service_container.go b/vendor/github.com/emicklei/go-restful/v3/web_service_container.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/web_service_container.go rename to vendor/github.com/emicklei/go-restful/v3/web_service_container.go diff --git a/vendor/github.com/evanphx/json-patch/.gitignore b/vendor/github.com/evanphx/json-patch/.gitignore deleted file mode 100644 index b7ed7f956..000000000 --- a/vendor/github.com/evanphx/json-patch/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -# editor and IDE paraphernalia -.idea -.vscode - -# macOS paraphernalia -.DS_Store diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md deleted file mode 100644 index 28e351693..000000000 --- a/vendor/github.com/evanphx/json-patch/README.md +++ /dev/null @@ -1,317 +0,0 @@ -# JSON-Patch -`jsonpatch` is a library which provides functionality for both applying -[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as -well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). - -[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) -[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) -[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) - -# Get It! - -**Latest and greatest**: -```bash -go get -u github.com/evanphx/json-patch/v5 -``` - -**Stable Versions**: -* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5` -* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` - -(previous versions below `v3` are unavailable) - -# Use It! -* [Create and apply a merge patch](#create-and-apply-a-merge-patch) -* [Create and apply a JSON Patch](#create-and-apply-a-json-patch) -* [Comparing JSON documents](#comparing-json-documents) -* [Combine merge patches](#combine-merge-patches) - - -# Configuration - -* There is a global configuration variable `jsonpatch.SupportNegativeIndices`. - This defaults to `true` and enables the non-standard practice of allowing - negative indices to mean indices starting at the end of an array. This - functionality can be disabled by setting `jsonpatch.SupportNegativeIndices = - false`. - -* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`, - which limits the total size increase in bytes caused by "copy" operations in a - patch. It defaults to 0, which means there is no limit. - -These global variables control the behavior of `jsonpatch.Apply`. - -An alternative to `jsonpatch.Apply` is `jsonpatch.ApplyWithOptions` whose behavior -is controlled by an `options` parameter of type `*jsonpatch.ApplyOptions`. - -Structure `jsonpatch.ApplyOptions` includes the configuration options above -and adds two new options: `AllowMissingPathOnRemove` and `EnsurePathExistsOnAdd`. - -When `AllowMissingPathOnRemove` is set to `true`, `jsonpatch.ApplyWithOptions` will ignore -`remove` operations whose `path` points to a non-existent location in the JSON document. -`AllowMissingPathOnRemove` defaults to `false` which will lead to `jsonpatch.ApplyWithOptions` -returning an error when hitting a missing `path` on `remove`. - -When `EnsurePathExistsOnAdd` is set to `true`, `jsonpatch.ApplyWithOptions` will make sure -that `add` operations produce all the `path` elements that are missing from the target object. - -Use `jsonpatch.NewApplyOptions` to create an instance of `jsonpatch.ApplyOptions` -whose values are populated from the global configuration variables. - -## Create and apply a merge patch -Given both an original JSON document and a modified JSON document, you can create -a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. - -It can describe the changes needed to convert from the original to the -modified JSON document. - -Once you have a merge patch, you can apply it to other JSON documents using the -`jsonpatch.MergePatch(document, patch)` function. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - // Let's create a merge patch from these two documents... - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - target := []byte(`{"name": "Jane", "age": 24}`) - - patch, err := jsonpatch.CreateMergePatch(original, target) - if err != nil { - panic(err) - } - - // Now lets apply the patch against a different JSON document... - - alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`) - modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch) - - fmt.Printf("patch document: %s\n", patch) - fmt.Printf("updated alternative doc: %s\n", modifiedAlternative) -} -``` - -When ran, you get the following output: - -```bash -$ go run main.go -patch document: {"height":null,"name":"Jane"} -updated alternative doc: {"age":28,"name":"Jane"} -``` - -## Create and apply a JSON Patch -You can create patch objects using `DecodePatch([]byte)`, which can then -be applied against JSON documents. - -The following is an example of creating a patch from two operations, and -applying it against a JSON document. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - patchJSON := []byte(`[ - {"op": "replace", "path": "/name", "value": "Jane"}, - {"op": "remove", "path": "/height"} - ]`) - - patch, err := jsonpatch.DecodePatch(patchJSON) - if err != nil { - panic(err) - } - - modified, err := patch.Apply(original) - if err != nil { - panic(err) - } - - fmt.Printf("Original document: %s\n", original) - fmt.Printf("Modified document: %s\n", modified) -} -``` - -When ran, you get the following output: - -```bash -$ go run main.go -Original document: {"name": "John", "age": 24, "height": 3.21} -Modified document: {"age":24,"name":"Jane"} -``` - -## Comparing JSON documents -Due to potential whitespace and ordering differences, one cannot simply compare -JSON strings or byte-arrays directly. - -As such, you can instead use `jsonpatch.Equal(document1, document2)` to -determine if two JSON documents are _structurally_ equal. This ignores -whitespace differences, and key-value ordering. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - similar := []byte(` - { - "age": 24, - "height": 3.21, - "name": "John" - } - `) - different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`) - - if jsonpatch.Equal(original, similar) { - fmt.Println(`"original" is structurally equal to "similar"`) - } - - if !jsonpatch.Equal(original, different) { - fmt.Println(`"original" is _not_ structurally equal to "different"`) - } -} -``` - -When ran, you get the following output: -```bash -$ go run main.go -"original" is structurally equal to "similar" -"original" is _not_ structurally equal to "different" -``` - -## Combine merge patches -Given two JSON merge patch documents, it is possible to combine them into a -single merge patch which can describe both set of changes. - -The resulting merge patch can be used such that applying it results in a -document structurally similar as merging each merge patch to the document -in succession. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - - nameAndHeight := []byte(`{"height":null,"name":"Jane"}`) - ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`) - - // Let's combine these merge patch documents... - combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes) - if err != nil { - panic(err) - } - - // Apply each patch individual against the original document - withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight) - if err != nil { - panic(err) - } - - withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes) - if err != nil { - panic(err) - } - - // Apply the combined patch against the original document - - withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch) - if err != nil { - panic(err) - } - - // Do both result in the same thing? They should! - if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) { - fmt.Println("Both JSON documents are structurally the same!") - } - - fmt.Printf("combined merge patch: %s", combinedPatch) -} -``` - -When ran, you get the following output: -```bash -$ go run main.go -Both JSON documents are structurally the same! -combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"} -``` - -# CLI for comparing JSON documents -You can install the commandline program `json-patch`. - -This program can take multiple JSON patch documents as arguments, -and fed a JSON document from `stdin`. It will apply the patch(es) against -the document and output the modified doc. - -**patch.1.json** -```json -[ - {"op": "replace", "path": "/name", "value": "Jane"}, - {"op": "remove", "path": "/height"} -] -``` - -**patch.2.json** -```json -[ - {"op": "add", "path": "/address", "value": "123 Main St"}, - {"op": "replace", "path": "/age", "value": "21"} -] -``` - -**document.json** -```json -{ - "name": "John", - "age": 24, - "height": 3.21 -} -``` - -You can then run: - -```bash -$ go install github.com/evanphx/json-patch/cmd/json-patch -$ cat document.json | json-patch -p patch.1.json -p patch.2.json -{"address":"123 Main St","age":"21","name":"Jane"} -``` - -# Help It! -Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues) -or [create a PR](https://github.com/evanphx/json-patch/compare). - - -Before creating a pull request, we'd ask that you make sure tests are passing -and that you have added new tests when applicable. - -Contributors can run tests using: - -```bash -go test -cover ./... -``` - -Builds for pull requests are tested automatically -using [TravisCI](https://travis-ci.org/evanphx/json-patch). diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/v5/LICENSE similarity index 100% rename from vendor/github.com/evanphx/json-patch/LICENSE rename to vendor/github.com/evanphx/json-patch/v5/LICENSE diff --git a/vendor/github.com/evanphx/json-patch/errors.go b/vendor/github.com/evanphx/json-patch/v5/errors.go similarity index 100% rename from vendor/github.com/evanphx/json-patch/errors.go rename to vendor/github.com/evanphx/json-patch/v5/errors.go diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/v5/merge.go similarity index 87% rename from vendor/github.com/evanphx/json-patch/merge.go rename to vendor/github.com/evanphx/json-patch/v5/merge.go index ad88d4018..a7c457342 100644 --- a/vendor/github.com/evanphx/json-patch/merge.go +++ b/vendor/github.com/evanphx/json-patch/v5/merge.go @@ -27,24 +27,33 @@ func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { } func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { - for k, v := range *patch { + for k, v := range patch.obj { if v == nil { if mergeMerge { - (*doc)[k] = nil + idx := -1 + for i, key := range doc.keys { + if key == k { + idx = i + break + } + } + if idx == -1 { + doc.keys = append(doc.keys, k) + } + doc.obj[k] = nil } else { - delete(*doc, k) + _ = doc.remove(k, &ApplyOptions{}) } } else { - cur, ok := (*doc)[k] + cur, ok := doc.obj[k] if !ok || cur == nil { if !mergeMerge { pruneNulls(v) } - - (*doc)[k] = v + _ = doc.set(k, v, &ApplyOptions{}) } else { - (*doc)[k] = merge(cur, v, mergeMerge) + _ = doc.set(k, merge(cur, v, mergeMerge), &ApplyOptions{}) } } } @@ -65,9 +74,9 @@ func pruneNulls(n *lazyNode) { } func pruneDocNulls(doc *partialDoc) *partialDoc { - for k, v := range *doc { + for k, v := range doc.obj { if v == nil { - delete(*doc, k) + _ = doc.remove(k, &ApplyOptions{}) } else { pruneNulls(v) } @@ -91,8 +100,8 @@ func pruneAryNulls(ary *partialArray) *partialArray { return ary } -var ErrBadJSONDoc = fmt.Errorf("Invalid JSON Document") -var ErrBadJSONPatch = fmt.Errorf("Invalid JSON Patch") +var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") // MergeMergePatches merges two merge patches together, such that @@ -116,20 +125,20 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { patchErr := json.Unmarshal(patchData, patch) - if _, ok := docErr.(*json.SyntaxError); ok { - return nil, ErrBadJSONDoc + if isSyntaxError(docErr) { + return nil, errBadJSONDoc } - if _, ok := patchErr.(*json.SyntaxError); ok { - return nil, ErrBadJSONPatch + if isSyntaxError(patchErr) { + return nil, errBadJSONPatch } - if docErr == nil && *doc == nil { - return nil, ErrBadJSONDoc + if docErr == nil && doc.obj == nil { + return nil, errBadJSONDoc } - if patchErr == nil && *patch == nil { - return nil, ErrBadJSONPatch + if patchErr == nil && patch.obj == nil { + return nil, errBadJSONPatch } if docErr != nil || patchErr != nil { @@ -145,7 +154,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { patchErr = json.Unmarshal(patchData, patchAry) if patchErr != nil { - return nil, ErrBadJSONPatch + return nil, errBadJSONPatch } pruneAryNulls(patchAry) @@ -153,7 +162,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { out, patchErr := json.Marshal(patchAry) if patchErr != nil { - return nil, ErrBadJSONPatch + return nil, errBadJSONPatch } return out, nil @@ -165,6 +174,16 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { return json.Marshal(doc) } +func isSyntaxError(err error) bool { + if _, ok := err.(*json.SyntaxError); ok { + return true + } + if _, ok := err.(*syntaxError); ok { + return true + } + return false +} + // resemblesJSONArray indicates whether the byte-slice "appears" to be // a JSON array or not. // False-positives are possible, as this function does not check the internal @@ -210,12 +229,12 @@ func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { err := json.Unmarshal(originalJSON, &originalDoc) if err != nil { - return nil, ErrBadJSONDoc + return nil, errBadJSONDoc } err = json.Unmarshal(modifiedJSON, &modifiedDoc) if err != nil { - return nil, ErrBadJSONDoc + return nil, errBadJSONDoc } dest, err := getDiff(originalDoc, modifiedDoc) @@ -236,17 +255,17 @@ func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { err := json.Unmarshal(originalJSON, &originalDocs) if err != nil { - return nil, ErrBadJSONDoc + return nil, errBadJSONDoc } err = json.Unmarshal(modifiedJSON, &modifiedDocs) if err != nil { - return nil, ErrBadJSONDoc + return nil, errBadJSONDoc } total := len(originalDocs) if len(modifiedDocs) != total { - return nil, ErrBadJSONDoc + return nil, errBadJSONDoc } result := []json.RawMessage{} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/v5/patch.go similarity index 56% rename from vendor/github.com/evanphx/json-patch/patch.go rename to vendor/github.com/evanphx/json-patch/v5/patch.go index dc2b7e51e..117f2c00d 100644 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ b/vendor/github.com/evanphx/json-patch/v5/patch.go @@ -24,6 +24,10 @@ var ( // AccumulatedCopySizeLimit limits the total size increase in bytes caused by // "copy" operations in a patch. AccumulatedCopySizeLimit int64 = 0 + startObject = json.Delim('{') + endObject = json.Delim('}') + startArray = json.Delim('[') + endArray = json.Delim(']') ) var ( @@ -32,11 +36,15 @@ var ( ErrUnknownType = errors.New("unknown object type") ErrInvalid = errors.New("invalid state detected") ErrInvalidIndex = errors.New("invalid index referenced") + + rawJSONArray = []byte("[]") + rawJSONObject = []byte("{}") + rawJSONNull = []byte("null") ) type lazyNode struct { raw *json.RawMessage - doc partialDoc + doc *partialDoc ary partialArray which int } @@ -47,20 +55,58 @@ type Operation map[string]*json.RawMessage // Patch is an ordered collection of Operations. type Patch []Operation -type partialDoc map[string]*lazyNode +type partialDoc struct { + keys []string + obj map[string]*lazyNode +} + type partialArray []*lazyNode type container interface { - get(key string) (*lazyNode, error) - set(key string, val *lazyNode) error - add(key string, val *lazyNode) error - remove(key string) error + get(key string, options *ApplyOptions) (*lazyNode, error) + set(key string, val *lazyNode, options *ApplyOptions) error + add(key string, val *lazyNode, options *ApplyOptions) error + remove(key string, options *ApplyOptions) error +} + +// ApplyOptions specifies options for calls to ApplyWithOptions. +// Use NewApplyOptions to obtain default values for ApplyOptions. +type ApplyOptions struct { + // SupportNegativeIndices decides whether to support non-standard practice of + // allowing negative indices to mean indices starting at the end of an array. + // Default to true. + SupportNegativeIndices bool + // AccumulatedCopySizeLimit limits the total size increase in bytes caused by + // "copy" operations in a patch. + AccumulatedCopySizeLimit int64 + // AllowMissingPathOnRemove indicates whether to fail "remove" operations when the target path is missing. + // Default to false. + AllowMissingPathOnRemove bool + // EnsurePathExistsOnAdd instructs json-patch to recursively create the missing parts of path on "add" operation. + // Default to false. + EnsurePathExistsOnAdd bool +} + +// NewApplyOptions creates a default set of options for calls to ApplyWithOptions. +func NewApplyOptions() *ApplyOptions { + return &ApplyOptions{ + SupportNegativeIndices: SupportNegativeIndices, + AccumulatedCopySizeLimit: AccumulatedCopySizeLimit, + AllowMissingPathOnRemove: false, + EnsurePathExistsOnAdd: false, + } } func newLazyNode(raw *json.RawMessage) *lazyNode { return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} } +func newRawMessage(buf []byte) *json.RawMessage { + ra := make(json.RawMessage, len(buf)) + copy(ra, buf) + return &ra +} + func (n *lazyNode) MarshalJSON() ([]byte, error) { switch n.which { case eRaw: @@ -82,6 +128,109 @@ func (n *lazyNode) UnmarshalJSON(data []byte) error { return nil } +func (n *partialDoc) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + if _, err := buf.WriteString("{"); err != nil { + return nil, err + } + for i, k := range n.keys { + if i > 0 { + if _, err := buf.WriteString(", "); err != nil { + return nil, err + } + } + key, err := json.Marshal(k) + if err != nil { + return nil, err + } + if _, err := buf.Write(key); err != nil { + return nil, err + } + if _, err := buf.WriteString(": "); err != nil { + return nil, err + } + value, err := json.Marshal(n.obj[k]) + if err != nil { + return nil, err + } + if _, err := buf.Write(value); err != nil { + return nil, err + } + } + if _, err := buf.WriteString("}"); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +type syntaxError struct { + msg string +} + +func (err *syntaxError) Error() string { + return err.msg +} + +func (n *partialDoc) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &n.obj); err != nil { + return err + } + buffer := bytes.NewBuffer(data) + d := json.NewDecoder(buffer) + if t, err := d.Token(); err != nil { + return err + } else if t != startObject { + return &syntaxError{fmt.Sprintf("unexpected JSON token in document node: %s", t)} + } + for d.More() { + k, err := d.Token() + if err != nil { + return err + } + key, ok := k.(string) + if !ok { + return &syntaxError{fmt.Sprintf("unexpected JSON token as document node key: %s", k)} + } + if err := skipValue(d); err != nil { + return err + } + n.keys = append(n.keys, key) + } + return nil +} + +func skipValue(d *json.Decoder) error { + t, err := d.Token() + if err != nil { + return err + } + if t != startObject && t != startArray { + return nil + } + for d.More() { + if t == startObject { + // consume key token + if _, err := d.Token(); err != nil { + return err + } + } + if err := skipValue(d); err != nil { + return err + } + } + end, err := d.Token() + if err != nil { + return err + } + if t == startObject && end != endObject { + return &syntaxError{msg: "expected close object token"} + } + if t == startArray && end != endArray { + return &syntaxError{msg: "expected close object token"} + } + return nil +} + func deepCopy(src *lazyNode) (*lazyNode, int, error) { if src == nil { return nil, 0, nil @@ -91,14 +240,12 @@ func deepCopy(src *lazyNode) (*lazyNode, int, error) { return nil, 0, err } sz := len(a) - ra := make(json.RawMessage, sz) - copy(ra, a) - return newLazyNode(&ra), sz, nil + return newLazyNode(newRawMessage(a)), sz, nil } func (n *lazyNode) intoDoc() (*partialDoc, error) { if n.which == eDoc { - return &n.doc, nil + return n.doc, nil } if n.raw == nil { @@ -112,7 +259,7 @@ func (n *lazyNode) intoDoc() (*partialDoc, error) { } n.which = eDoc - return &n.doc, nil + return n.doc, nil } func (n *lazyNode) intoAry() (*partialArray, error) { @@ -202,12 +349,12 @@ func (n *lazyNode) equal(o *lazyNode) bool { return false } - if len(n.doc) != len(o.doc) { + if len(n.doc.obj) != len(o.doc.obj) { return false } - for k, v := range n.doc { - ov, ok := o.doc[k] + for k, v := range n.doc.obj { + ov, ok := o.doc.obj[k] if !ok { return false @@ -340,7 +487,7 @@ Loop: return false } -func findObject(pd *container, path string) (container, string) { +func findObject(pd *container, path string, options *ApplyOptions) (container, string) { doc := *pd split := strings.Split(path, "/") @@ -357,7 +504,7 @@ func findObject(pd *container, path string) (container, string) { for _, part := range parts { - next, ok := doc.get(decodePatchKey(part)) + next, ok := doc.get(decodePatchKey(part), options) if next == nil || ok != nil { return nil, "" @@ -381,40 +528,63 @@ func findObject(pd *container, path string) (container, string) { return doc, decodePatchKey(key) } -func (d *partialDoc) set(key string, val *lazyNode) error { - (*d)[key] = val +func (d *partialDoc) set(key string, val *lazyNode, options *ApplyOptions) error { + found := false + for _, k := range d.keys { + if k == key { + found = true + break + } + } + if !found { + d.keys = append(d.keys, key) + } + d.obj[key] = val return nil } -func (d *partialDoc) add(key string, val *lazyNode) error { - (*d)[key] = val - return nil +func (d *partialDoc) add(key string, val *lazyNode, options *ApplyOptions) error { + return d.set(key, val, options) } -func (d *partialDoc) get(key string) (*lazyNode, error) { - return (*d)[key], nil +func (d *partialDoc) get(key string, options *ApplyOptions) (*lazyNode, error) { + v, ok := d.obj[key] + if !ok { + return v, errors.Wrapf(ErrMissing, "unable to get nonexistent key: %s", key) + } + return v, nil } -func (d *partialDoc) remove(key string) error { - _, ok := (*d)[key] +func (d *partialDoc) remove(key string, options *ApplyOptions) error { + _, ok := d.obj[key] if !ok { - return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) + if options.AllowMissingPathOnRemove { + return nil + } + return errors.Wrapf(ErrMissing, "unable to remove nonexistent key: %s", key) } - - delete(*d, key) + idx := -1 + for i, k := range d.keys { + if k == key { + idx = i + break + } + } + d.keys = append(d.keys[0:idx], d.keys[idx+1:]...) + delete(d.obj, key) return nil } // set should only be used to implement the "replace" operation, so "key" must // be an already existing index in "d". -func (d *partialArray) set(key string, val *lazyNode) error { +func (d *partialArray) set(key string, val *lazyNode, options *ApplyOptions) error { idx, err := strconv.Atoi(key) if err != nil { return err } if idx < 0 { - if !SupportNegativeIndices { + if !options.SupportNegativeIndices { return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } if idx < -len(*d) { @@ -427,7 +597,7 @@ func (d *partialArray) set(key string, val *lazyNode) error { return nil } -func (d *partialArray) add(key string, val *lazyNode) error { +func (d *partialArray) add(key string, val *lazyNode, options *ApplyOptions) error { if key == "-" { *d = append(*d, val) return nil @@ -449,7 +619,7 @@ func (d *partialArray) add(key string, val *lazyNode) error { } if idx < 0 { - if !SupportNegativeIndices { + if !options.SupportNegativeIndices { return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } if idx < -len(ary) { @@ -466,7 +636,7 @@ func (d *partialArray) add(key string, val *lazyNode) error { return nil } -func (d *partialArray) get(key string) (*lazyNode, error) { +func (d *partialArray) get(key string, options *ApplyOptions) (*lazyNode, error) { idx, err := strconv.Atoi(key) if err != nil { @@ -474,7 +644,7 @@ func (d *partialArray) get(key string) (*lazyNode, error) { } if idx < 0 { - if !SupportNegativeIndices { + if !options.SupportNegativeIndices { return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } if idx < -len(*d) { @@ -490,7 +660,7 @@ func (d *partialArray) get(key string) (*lazyNode, error) { return (*d)[idx], nil } -func (d *partialArray) remove(key string) error { +func (d *partialArray) remove(key string, options *ApplyOptions) error { idx, err := strconv.Atoi(key) if err != nil { return err @@ -499,14 +669,20 @@ func (d *partialArray) remove(key string) error { cur := *d if idx >= len(cur) { + if options.AllowMissingPathOnRemove { + return nil + } return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } if idx < 0 { - if !SupportNegativeIndices { + if !options.SupportNegativeIndices { return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } if idx < -len(cur) { + if options.AllowMissingPathOnRemove { + return nil + } return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } idx += len(cur) @@ -519,22 +695,29 @@ func (d *partialArray) remove(key string) error { *d = ary return nil - } -func (p Patch) add(doc *container, op Operation) error { +func (p Patch) add(doc *container, op Operation, options *ApplyOptions) error { path, err := op.Path() if err != nil { return errors.Wrapf(ErrMissing, "add operation failed to decode path") } - con, key := findObject(doc, path) + if options.EnsurePathExistsOnAdd { + err = ensurePathExists(doc, path, options) + + if err != nil { + return err + } + } + + con, key := findObject(doc, path, options) if con == nil { return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) } - err = con.add(key, op.value()) + err = con.add(key, op.value(), options) if err != nil { return errors.Wrapf(err, "error in add for path: '%s'", path) } @@ -542,19 +725,113 @@ func (p Patch) add(doc *container, op Operation) error { return nil } -func (p Patch) remove(doc *container, op Operation) error { +// Given a document and a path to a key, walk the path and create all missing elements +// creating objects and arrays as needed. +func ensurePathExists(pd *container, path string, options *ApplyOptions) error { + doc := *pd + + var err error + var arrIndex int + + split := strings.Split(path, "/") + + if len(split) < 2 { + return nil + } + + parts := split[1:] + + for pi, part := range parts { + + // Have we reached the key part of the path? + // If yes, we're done. + if pi == len(parts)-1 { + return nil + } + + target, ok := doc.get(decodePatchKey(part), options) + + if target == nil || ok != nil { + + // If the current container is an array which has fewer elements than our target index, + // pad the current container with nulls. + if arrIndex, err = strconv.Atoi(part); err == nil { + pa, ok := doc.(*partialArray) + + if ok && arrIndex >= len(*pa)+1 { + // Pad the array with null values up to the required index. + for i := len(*pa); i <= arrIndex-1; i++ { + doc.add(strconv.Itoa(i), newLazyNode(newRawMessage(rawJSONNull)), options) + } + } + } + + // Check if the next part is a numeric index or "-". + // If yes, then create an array, otherwise, create an object. + if arrIndex, err = strconv.Atoi(parts[pi+1]); err == nil || parts[pi+1] == "-" { + if arrIndex < 0 { + + if !options.SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to ensure path for invalid index: %d", arrIndex) + } + + if arrIndex < -1 { + return errors.Wrapf(ErrInvalidIndex, "Unable to ensure path for negative index other than -1: %d", arrIndex) + } + + arrIndex = 0 + } + + newNode := newLazyNode(newRawMessage(rawJSONArray)) + doc.add(part, newNode, options) + doc, _ = newNode.intoAry() + + // Pad the new array with null values up to the required index. + for i := 0; i < arrIndex; i++ { + doc.add(strconv.Itoa(i), newLazyNode(newRawMessage(rawJSONNull)), options) + } + } else { + newNode := newLazyNode(newRawMessage(rawJSONObject)) + + doc.add(part, newNode, options) + doc, _ = newNode.intoDoc() + } + } else { + if isArray(*target.raw) { + doc, err = target.intoAry() + + if err != nil { + return err + } + } else { + doc, err = target.intoDoc() + + if err != nil { + return err + } + } + } + } + + return nil +} + +func (p Patch) remove(doc *container, op Operation, options *ApplyOptions) error { path, err := op.Path() if err != nil { return errors.Wrapf(ErrMissing, "remove operation failed to decode path") } - con, key := findObject(doc, path) + con, key := findObject(doc, path, options) if con == nil { + if options.AllowMissingPathOnRemove { + return nil + } return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) } - err = con.remove(key) + err = con.remove(key, options) if err != nil { return errors.Wrapf(err, "error in remove for path: '%s'", path) } @@ -562,7 +839,7 @@ func (p Patch) remove(doc *container, op Operation) error { return nil } -func (p Patch) replace(doc *container, op Operation) error { +func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) error { path, err := op.Path() if err != nil { return errors.Wrapf(err, "replace operation failed to decode path") @@ -583,7 +860,7 @@ func (p Patch) replace(doc *container, op Operation) error { case eAry: *doc = &val.ary case eDoc: - *doc = &val.doc + *doc = val.doc case eRaw: return errors.Wrapf(err, "replace operation hit impossible case") } @@ -591,18 +868,18 @@ func (p Patch) replace(doc *container, op Operation) error { return nil } - con, key := findObject(doc, path) + con, key := findObject(doc, path, options) if con == nil { return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) } - _, ok := con.get(key) + _, ok := con.get(key, options) if ok != nil { return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) } - err = con.set(key, op.value()) + err = con.set(key, op.value(), options) if err != nil { return errors.Wrapf(err, "error in remove for path: '%s'", path) } @@ -610,24 +887,24 @@ func (p Patch) replace(doc *container, op Operation) error { return nil } -func (p Patch) move(doc *container, op Operation) error { +func (p Patch) move(doc *container, op Operation, options *ApplyOptions) error { from, err := op.From() if err != nil { return errors.Wrapf(err, "move operation failed to decode from") } - con, key := findObject(doc, from) + con, key := findObject(doc, from, options) if con == nil { return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) } - val, err := con.get(key) + val, err := con.get(key, options) if err != nil { return errors.Wrapf(err, "error in move for path: '%s'", key) } - err = con.remove(key) + err = con.remove(key, options) if err != nil { return errors.Wrapf(err, "error in move for path: '%s'", key) } @@ -637,13 +914,13 @@ func (p Patch) move(doc *container, op Operation) error { return errors.Wrapf(err, "move operation failed to decode path") } - con, key = findObject(doc, path) + con, key = findObject(doc, path, options) if con == nil { return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) } - err = con.add(key, val) + err = con.add(key, val, options) if err != nil { return errors.Wrapf(err, "error in move for path: '%s'", path) } @@ -651,7 +928,7 @@ func (p Patch) move(doc *container, op Operation) error { return nil } -func (p Patch) test(doc *container, op Operation) error { +func (p Patch) test(doc *container, op Operation, options *ApplyOptions) error { path, err := op.Path() if err != nil { return errors.Wrapf(err, "test operation failed to decode path") @@ -662,7 +939,7 @@ func (p Patch) test(doc *container, op Operation) error { switch sv := (*doc).(type) { case *partialDoc: - self.doc = *sv + self.doc = sv self.which = eDoc case *partialArray: self.ary = *sv @@ -676,14 +953,14 @@ func (p Patch) test(doc *container, op Operation) error { return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) } - con, key := findObject(doc, path) + con, key := findObject(doc, path, options) if con == nil { return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) } - val, err := con.get(key) - if err != nil { + val, err := con.get(key, options) + if err != nil && errors.Cause(err) != ErrMissing { return errors.Wrapf(err, "error in test for path: '%s'", path) } @@ -703,19 +980,19 @@ func (p Patch) test(doc *container, op Operation) error { return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) } -func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { +func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64, options *ApplyOptions) error { from, err := op.From() if err != nil { return errors.Wrapf(err, "copy operation failed to decode from") } - con, key := findObject(doc, from) + con, key := findObject(doc, from, options) if con == nil { return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) } - val, err := con.get(key) + val, err := con.get(key, options) if err != nil { return errors.Wrapf(err, "error in copy for from: '%s'", from) } @@ -725,7 +1002,7 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) er return errors.Wrapf(ErrMissing, "copy operation failed to decode path") } - con, key = findObject(doc, path) + con, key = findObject(doc, path, options) if con == nil { return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) @@ -737,11 +1014,11 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) er } (*accumulatedCopySize) += int64(sz) - if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit { - return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize) + if options.AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > options.AccumulatedCopySizeLimit { + return NewAccumulatedCopySizeError(options.AccumulatedCopySizeLimit, *accumulatedCopySize) } - err = con.add(key, valCopy) + err = con.add(key, valCopy, options) if err != nil { return errors.Wrapf(err, "error while adding value during copy") } @@ -751,13 +1028,8 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) er // Equal indicates if 2 JSON documents have the same structural equality. func Equal(a, b []byte) bool { - ra := make(json.RawMessage, len(a)) - copy(ra, a) - la := newLazyNode(&ra) - - rb := make(json.RawMessage, len(b)) - copy(rb, b) - lb := newLazyNode(&rb) + la := newLazyNode(newRawMessage(a)) + lb := newLazyNode(newRawMessage(b)) return la.equal(lb) } @@ -778,12 +1050,24 @@ func DecodePatch(buf []byte) (Patch, error) { // Apply mutates a JSON document according to the patch, and returns the new // document. func (p Patch) Apply(doc []byte) ([]byte, error) { - return p.ApplyIndent(doc, "") + return p.ApplyWithOptions(doc, NewApplyOptions()) +} + +// ApplyWithOptions mutates a JSON document according to the patch and the passed in ApplyOptions. +// It returns the new document. +func (p Patch) ApplyWithOptions(doc []byte, options *ApplyOptions) ([]byte, error) { + return p.ApplyIndentWithOptions(doc, "", options) } // ApplyIndent mutates a JSON document according to the patch, and returns the new // document indented. func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { + return p.ApplyIndentWithOptions(doc, indent, NewApplyOptions()) +} + +// ApplyIndentWithOptions mutates a JSON document according to the patch and the passed in ApplyOptions. +// It returns the new document indented. +func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyOptions) ([]byte, error) { if len(doc) == 0 { return doc, nil } @@ -808,17 +1092,17 @@ func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { for _, op := range p { switch op.Kind() { case "add": - err = p.add(&pd, op) + err = p.add(&pd, op, options) case "remove": - err = p.remove(&pd, op) + err = p.remove(&pd, op, options) case "replace": - err = p.replace(&pd, op) + err = p.replace(&pd, op, options) case "move": - err = p.move(&pd, op) + err = p.move(&pd, op, options) case "test": - err = p.test(&pd, op) + err = p.test(&pd, op, options) case "copy": - err = p.copy(&pd, op, &accumulatedCopySize) + err = p.copy(&pd, op, &accumulatedCopySize, options) default: err = fmt.Errorf("Unexpected kind: %s", op.Kind()) } diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 4cd0cbaf4..1d89d85ce 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -1,6 +1,6 @@ -# Setup a Global .gitignore for OS and editor generated files: -# https://help.github.com/articles/ignoring-files -# git config --global core.excludesfile ~/.gitignore_global +# go test -c output +*.test +*.test.exe -.vagrant -*.sublime-project +# Output of go build ./cmd/fsnotify +/fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS deleted file mode 100644 index 6cbabe5ef..000000000 --- a/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ /dev/null @@ -1,62 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS - -# Please keep the list sorted. - -Aaron L -Adrien Bustany -Alexey Kazakov -Amit Krishnan -Anmol Sethi -Bjørn Erik Pedersen -Brian Goff -Bruno Bigras -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Daniel Wagner-Hall -Dave Cheney -Eric Lin -Evan Phoenix -Francisco Souza -Gautam Dey -Hari haran -Ichinose Shogo -Johannes Ebke -John C Barstow -Kelvin Fo -Ken-ichirou MATSUZAWA -Matt Layher -Matthias Stone -Nathan Youngman -Nickolai Zeldovich -Oliver Bristow -Patrick -Paul Hammond -Pawel Knap -Pieter Droogendijk -Pratik Shinde -Pursuit92 -Riku Voipio -Rob Figueiredo -Rodrigo Chiossi -Slawek Ligus -Soge Zhang -Tiffany Jernigan -Tilak Sharma -Tobias Klauser -Tom Payne -Travis Cline -Tudor Golubenco -Vahe Khachikyan -Yukang -bronze1man -debrando -henrikedwards -铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index a438fe4b4..77f9593bd 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -7,9 +7,116 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +Nothing yet. + +## [1.6.0] - 2022-10-13 + +This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1, +but not documented). It also increases the minimum Linux version to 2.6.32. + +### Additions + +- all: add `Event.Has()` and `Op.Has()` ([#477]) + + This makes checking events a lot easier; for example: + + if event.Op&Write == Write && !(event.Op&Remove == Remove) { + } + + Becomes: + + if event.Has(Write) && !event.Has(Remove) { + } + +- all: add cmd/fsnotify ([#463]) + + A command-line utility for testing and some examples. + +### Changes and fixes + +- inotify: don't ignore events for files that don't exist ([#260], [#470]) + + Previously the inotify watcher would call `os.Lstat()` to check if a file + still exists before emitting events. + + This was inconsistent with other platforms and resulted in inconsistent event + reporting (e.g. when a file is quickly removed and re-created), and generally + a source of confusion. It was added in 2013 to fix a memory leak that no + longer exists. + +- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's + not watched ([#460]) + +- inotify: replace epoll() with non-blocking inotify ([#434]) + + Non-blocking inotify was not generally available at the time this library was + written in 2014, but now it is. As a result, the minimum Linux version is + bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster. + +- kqueue: don't check for events every 100ms ([#480]) + + The watcher would wake up every 100ms, even when there was nothing to do. Now + it waits until there is something to do. + +- macos: retry opening files on EINTR ([#475]) + +- kqueue: skip unreadable files ([#479]) + + kqueue requires a file descriptor for every file in a directory; this would + fail if a file was unreadable by the current user. Now these files are simply + skipped. + +- windows: fix renaming a watched directory if the parent is also watched ([#370]) + +- windows: increase buffer size from 4K to 64K ([#485]) + +- windows: close file handle on Remove() ([#288]) + +- kqueue: put pathname in the error if watching a file fails ([#471]) + +- inotify, windows: calling Close() more than once could race ([#465]) + +- kqueue: improve Close() performance ([#233]) + +- all: various documentation additions and clarifications. + +[#233]: https://github.com/fsnotify/fsnotify/pull/233 +[#260]: https://github.com/fsnotify/fsnotify/pull/260 +[#288]: https://github.com/fsnotify/fsnotify/pull/288 +[#370]: https://github.com/fsnotify/fsnotify/pull/370 +[#434]: https://github.com/fsnotify/fsnotify/pull/434 +[#460]: https://github.com/fsnotify/fsnotify/pull/460 +[#463]: https://github.com/fsnotify/fsnotify/pull/463 +[#465]: https://github.com/fsnotify/fsnotify/pull/465 +[#470]: https://github.com/fsnotify/fsnotify/pull/470 +[#471]: https://github.com/fsnotify/fsnotify/pull/471 +[#475]: https://github.com/fsnotify/fsnotify/pull/475 +[#477]: https://github.com/fsnotify/fsnotify/pull/477 +[#479]: https://github.com/fsnotify/fsnotify/pull/479 +[#480]: https://github.com/fsnotify/fsnotify/pull/480 +[#485]: https://github.com/fsnotify/fsnotify/pull/485 + +## [1.5.4] - 2022-04-25 + +* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447) +* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444) +* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443) + +## [1.5.3] - 2022-04-22 + +* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445) + +## [1.5.2] - 2022-04-21 + +* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374) +* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361) +* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424) +* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406) +* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416) + ## [1.5.1] - 2021-08-24 -* Revert Add AddRaw to not follow symlinks +* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394) ## [1.5.0] - 2021-08-20 @@ -22,6 +129,30 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 [#385](https://github.com/fsnotify/fsnotify/pull/385) * Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) +## [1.4.9] - 2020-03-11 + +* Move example usage to the readme #329. This may resolve #328. + +## [1.4.8] - 2020-03-10 + +* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216) +* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265) +* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266) +* CI: Less verbosity (@nathany #267) +* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267) +* Tests: Check if channels are closed in the example (@alexeykazakov #244) +* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284) +* CI: Add windows to travis matrix (@cpuguy83 #284) +* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93) +* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219) +* Linux: open files with close-on-exec (@linxiulei #273) +* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 ) +* Project: Add go.mod (@nathany #309) +* Project: Revise editor config (@nathany #309) +* Project: Update copyright for 2019 (@nathany #309) +* CI: Drop go1.8 from CI matrix (@nathany #309) +* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e ) + ## [1.4.7] - 2018-01-09 * BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index 828a60b24..ea379759d 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,77 +1,26 @@ -# Contributing +Thank you for your interest in contributing to fsnotify! We try to review and +merge PRs in a reasonable timeframe, but please be aware that: -## Issues +- To avoid "wasted" work, please discus changes on the issue tracker first. You + can just send PRs, but they may end up being rejected for one reason or the + other. -* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). -* Please indicate the platform you are using fsnotify on. -* A code example to reproduce the problem is appreciated. +- fsnotify is a cross-platform library, and changes must work reasonably well on + all supported platforms. -## Pull Requests +- Changes will need to be compatible; old code should still compile, and the + runtime behaviour can't change in ways that are likely to lead to problems for + users. -### Contributor License Agreement +Testing +------- +Just `go test ./...` runs all the tests; the CI runs this on all supported +platforms. Testing different platforms locally can be done with something like +[goon] or [Vagrant], but this isn't super-easy to set up at the moment. -fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). +Use the `-short` flag to make the "stress test" run faster. -Please indicate that you have signed the CLA in your pull request. -### How fsnotify is Developed - -* Development is done on feature branches. -* Tests are run on BSD, Linux, macOS and Windows. -* Pull requests are reviewed and [applied to master][am] using [hub][]. - * Maintainers may modify or squash commits rather than asking contributors to. -* To issue a new release, the maintainers will: - * Update the CHANGELOG - * Tag a version, which will become available through gopkg.in. - -### How to Fork - -For smooth sailing, always use the original import path. Installing with `go get` makes this easy. - -1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Ensure everything works and the tests pass (see below) -4. Commit your changes (`git commit -am 'Add some feature'`) - -Contribute upstream: - -1. Fork fsnotify on GitHub -2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) -3. Push to the branch (`git push fork my-new-feature`) -4. Create a new Pull Request on GitHub - -This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). - -### Testing - -fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. - -Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. - -To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. - -* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) -* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. -* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) -* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`. -* When you're done, you will want to halt or destroy the Vagrant boxes. - -Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. - -Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). - -### Maintainers - -Help maintaining fsnotify is welcome. To be a maintainer: - -* Submit a pull request and sign the CLA as above. -* You must be able to run the test suite on Mac, Windows, Linux and BSD. - -To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. - -All code changes should be internal pull requests. - -Releases are tagged using [Semantic Versioning](http://semver.org/). - -[hub]: https://github.com/github/hub -[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs +[goon]: https://github.com/arp242/goon +[Vagrant]: https://www.vagrantup.com/ +[integration_test.go]: /integration_test.go diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE index e180c8fb0..fb03ade75 100644 --- a/vendor/github.com/fsnotify/fsnotify/LICENSE +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -1,28 +1,25 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. +Copyright © 2012 The Go Authors. All rights reserved. +Copyright © fsnotify Authors. All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +* Neither the name of Google Inc. nor the names of its contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index df57b1b28..d4e6080fe 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -1,130 +1,161 @@ -# File system notifications for Go +fsnotify is a Go library to provide cross-platform filesystem notifications on +Windows, Linux, macOS, and BSD systems. -[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) +Go 1.16 or newer is required; the full documentation is at +https://pkg.go.dev/github.com/fsnotify/fsnotify -fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: +**It's best to read the documentation at pkg.go.dev, as it's pinned to the last +released version, whereas this README is for the last development version which +may include additions/changes.** -```console -go get -u golang.org/x/sys/... -``` - -Cross platform: Windows, Linux, BSD and macOS. - -| Adapter | OS | Status | -| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| inotify | Linux 2.6.27 or later, Android\* | Supported | -| kqueue | BSD, macOS, iOS\* | Supported | -| ReadDirectoryChangesW | Windows | Supported | -| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | -| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | -| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | -| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | -| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | - -\* Android and iOS are untested. - -Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. - -## API stability +--- -fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). +Platform support: -All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. +| Adapter | OS | Status | +| --------------------- | ---------------| -------------------------------------------------------------| +| inotify | Linux 2.6.32+ | Supported | +| kqueue | BSD, macOS | Supported | +| ReadDirectoryChangesW | Windows | Supported | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | +| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | -Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. +Linux and macOS should include Android and iOS, but these are currently untested. -## Usage +Usage +----- +A basic example: ```go package main import ( - "log" + "log" - "github.com/fsnotify/fsnotify" + "github.com/fsnotify/fsnotify" ) func main() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - done := make(chan bool) - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { - return - } - log.Println("event:", event) - if event.Op&fsnotify.Write == fsnotify.Write { - log.Println("modified file:", event.Name) - } - case err, ok := <-watcher.Errors: - if !ok { - return - } - log.Println("error:", err) - } - } - }() - - err = watcher.Add("/tmp/foo") - if err != nil { - log.Fatal(err) - } - <-done + // Create new watcher. + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + // Start listening for events. + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Has(fsnotify.Write) { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + // Add a path. + err = watcher.Add("/tmp") + if err != nil { + log.Fatal(err) + } + + // Block main goroutine forever. + <-make(chan struct{}) } ``` -## Contributing +Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be +run with: -Please refer to [CONTRIBUTING][] before opening an issue or pull request. + % go run ./cmd/fsnotify -## Example +FAQ +--- +### Will a file still be watched when it's moved to another directory? +No, not unless you are watching the location it was moved to. -See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). +### Are subdirectories watched too? +No, you must add watches for any directory you want to watch (a recursive +watcher is on the roadmap: [#18]). -## FAQ +[#18]: https://github.com/fsnotify/fsnotify/issues/18 -**When a file is moved to another directory is it still being watched?** +### Do I have to watch the Error and Event channels in a goroutine? +As of now, yes (you can read both channels in the same goroutine using `select`, +you don't need a separate goroutine for both channels; see the example). -No (it shouldn't be, unless you are watching where it was moved to). +### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys? +fsnotify requires support from underlying OS to work. The current NFS and SMB +protocols does not provide network level support for file notifications, and +neither do the /proc and /sys virtual filesystems. -**When I watch a directory, are all subdirectories watched as well?** +This could be fixed with a polling watcher ([#9]), but it's not yet implemented. -No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). +[#9]: https://github.com/fsnotify/fsnotify/issues/9 -**Do I have to watch the Error and Event channels in a separate goroutine?** +Platform-specific notes +----------------------- +### Linux +When a file is removed a REMOVE event won't be emitted until all file +descriptors are closed; it will emit a CHMOD instead: -As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) + fp := os.Open("file") + os.Remove("file") // CHMOD + fp.Close() // REMOVE -**Why am I receiving multiple events for the same file on OS X?** +This is the event that inotify sends, so not much can be changed about this. -Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). +The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for +the number of watches per user, and `fs.inotify.max_user_instances` specifies +the maximum number of inotify instances per user. Every Watcher you create is an +"instance", and every path you add is a "watch". -**How many files can be watched at once?** +These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and +`/proc/sys/fs/inotify/max_user_instances` -There are OS-specific limits as to how many watches can be created: -* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. -* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. +To increase them you can use `sysctl` or write the value to proc file: -**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** + # The default values on Linux 5.18 + sysctl fs.inotify.max_user_watches=124983 + sysctl fs.inotify.max_user_instances=128 -fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. +To make the changes persist on reboot edit `/etc/sysctl.conf` or +`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your +distro's documentation): -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#18]: https://github.com/fsnotify/fsnotify/issues/18 -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#7]: https://github.com/howeyc/fsnotify/issues/7 + fs.inotify.max_user_watches=124983 + fs.inotify.max_user_instances=128 -[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md +Reaching the limit will result in a "no space left on device" or "too many open +files" error. -## Related Projects +### kqueue (macOS, all BSD systems) +kqueue requires opening a file descriptor for every file that's being watched; +so if you're watching a directory with five files then that's six file +descriptors. You will run in to your system's "max open files" limit faster on +these platforms. -* [notify](https://github.com/rjeczalik/notify) -* [fsevents](https://github.com/fsnotify/fsevents) +The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to +control the maximum number of open files. +### macOS +Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary +workaround is to add your folder(s) to the *Spotlight Privacy settings* until we +have a native FSEvents implementation (see [#11]). + +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#15]: https://github.com/fsnotify/fsnotify/issues/15 diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go new file mode 100644 index 000000000..1a95ad8e7 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -0,0 +1,162 @@ +//go:build solaris +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go new file mode 100644 index 000000000..54c77fbb0 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -0,0 +1,459 @@ +//go:build linux +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + // Store fd here as os.File.Read() will no longer return on close after + // calling Fd(). See: https://github.com/golang/go/issues/26439 + fd int + mu sync.Mutex // Map access + inotifyFile *os.File + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + // Need to set the FD to nonblocking mode in order for SetDeadline methods to work + // Otherwise, blocking i/o operations won't terminate on close + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) + if fd == -1 { + return nil, errno + } + + w := &Watcher{ + fd: fd, + inotifyFile: os.NewFile(uintptr(fd), ""), + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *Watcher) sendEvent(e Event) bool { + select { + case w.Events <- e: + return true + case <-w.done: + } + return false +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.done: + return false + } +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed() { + w.mu.Unlock() + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + w.mu.Unlock() + + // Causes any blocking reads to return with an error, provided the file + // still supports deadline operations. + err := w.inotifyFile.Close() + if err != nil { + return err + } + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case; + // The only two possible errors are: + // + // - EBADF, which happens when w.fd is not a valid file descriptor + // of any kind. + // - EINVAL, which is when fd is not an inotify descriptor or wd + // is not a valid watch descriptor. Watch descriptors are + // invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they + // are watching is deleted. + return errno + } + + return nil +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + defer func() { + close(w.doneResp) + close(w.Errors) + close(w.Events) + }() + + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + errno error // Syscall errno + ) + for { + // See if we have been closed. + if w.isClosed() { + return + } + + n, err := w.inotifyFile.Read(buf[:]) + switch { + case errors.Unwrap(err) == os.ErrClosed: + return + case err != nil: + if !w.sendError(err) { + return + } + continue + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + if !w.sendError(err) { + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + var ( + // Point "raw" to the event in the buffer + raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + mask = uint32(raw.Mask) + nameLen = uint32(raw.Len) + ) + + if mask&unix.IN_Q_OVERFLOW != 0 { + if !w.sendError(ErrEventOverflow) { + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := w.newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if mask&unix.IN_IGNORED == 0 { + if !w.sendEvent(event) { + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go new file mode 100644 index 000000000..29087469b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -0,0 +1,707 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + done chan struct{} + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing. + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Watched file descriptors (key: path). + watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). + userWatches map[string]struct{} // Watches added with Watcher.Add() + dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. + paths map[int]pathInfo // File descriptors to path names for processing kqueue events. + fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + kq, closepipe, err := newKqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + closepipe: closepipe, + watches: make(map[string]int), + watchesByDir: make(map[string]map[int]struct{}), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]struct{}), + userWatches: make(map[string]struct{}), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// newKqueue creates a new kernel event queue and returns a descriptor. +// +// This registers a new event on closepipe, which will trigger an event when +// it's closed. This way we can use kevent() without timeout/polling; without +// the closepipe, it would block forever and we wouldn't be able to stop it at +// all. +func newKqueue() (kq int, closepipe [2]int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, closepipe, err + } + + // Register the close pipe. + err = unix.Pipe(closepipe[:]) + if err != nil { + unix.Close(kq) + return kq, closepipe, err + } + + // Register changes to listen on the closepipe. + changes := make([]unix.Kevent_t, 1) + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ, + unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT) + + ok, err := unix.Kevent(kq, changes, nil, nil) + if ok == -1 { + unix.Close(kq) + unix.Close(closepipe[0]) + unix.Close(closepipe[1]) + return kq, closepipe, err + } + return kq, closepipe, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *Watcher) sendEvent(e Event) bool { + select { + case w.Events <- e: + return true + case <-w.done: + } + return false +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.done: + } + return false +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + pathsToRemove := make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() // Unlock before calling Remove, which also locks + for _, name := range pathsToRemove { + w.Remove(name) + } + + // Send "quit" message to the reader goroutine. + unix.Close(w.closepipe[1]) + close(w.done) + + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.userWatches[name] = struct{}{} + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + if err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.userWatches, name) + + parentName := filepath.Dir(name) + delete(w.watchesByDir[parentName], watchfd) + + if len(w.watchesByDir[parentName]) == 0 { + delete(w.watchesByDir, parentName) + } + + delete(w.paths, watchfd) + delete(w.dirFlags, name) + delete(w.fileExists, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for fd := range w.watchesByDir[name] { + path := w.paths[fd] + if _, ok := w.userWatches[path.name]; !ok { + pathsToRemove = append(pathsToRemove, path.name) + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.userWatches)) + for pathname := range w.userWatches { + entries = append(entries, pathname) + } + + return entries +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets or named pipes + if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { + return "", nil + } + + // Follow Symlinks + // + // Linux can add unresolvable symlinks to the watch list without issue, + // and Windows can't do symlinks period. To maintain consistency, we + // will act like everything is fine if the link can't be resolved. + // There will simply be no file events for broken symlinks. Hence the + // returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + // Retry on EINTR; open() can return EINTR in practice on macOS. + // See #354, and go issues 11180 and 39237. + for { + watchfd, err = unix.Open(name, openMode, 0) + if err == nil { + break + } + if errors.Is(err, unix.EINTR) { + continue + } + + return "", err + } + + isDir = fi.IsDir() + } + + err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + if err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + parentName := filepath.Dir(name) + w.watches[name] = watchfd + + watchesByDir, ok := w.watchesByDir[parentName] + if !ok { + watchesByDir = make(map[int]struct{}, 1) + w.watchesByDir[parentName] = watchesByDir + } + watchesByDir[watchfd] = struct{}{} + + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + defer func() { + err := unix.Close(w.kq) + if err != nil { + w.Errors <- err + } + unix.Close(w.closepipe[0]) + close(w.Events) + close(w.Errors) + }() + + eventBuffer := make([]unix.Kevent_t, 10) + for closed := false; !closed; { + kevents, err := w.read(eventBuffer) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { + closed = true + } + continue + } + + // Flush the events we received to the Events channel + for _, kevent := range kevents { + var ( + watchfd = int(kevent.Ident) + mask = uint32(kevent.Fflags) + ) + + // Shut down the loop when the pipe is closed, but only after all + // other events have been processed. + if watchfd == w.closepipe[0] { + closed = true + continue + } + + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + + event := w.newEvent(path.name, mask) + + if path.isDir && !event.Has(Remove) { + // Double check to make sure the directory exists. This can + // happen when we do a rm -fr on a recursively watched folders + // and we receive a modification event first but the folder has + // been deleted and later receive the delete event. + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + event.Op |= Remove + } + } + + if event.Has(Rename) || event.Has(Remove) { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Has(Write) && !event.Has(Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + if !w.sendEvent(event) { + closed = true + continue + } + } + + if event.Has(Remove) { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + } + } +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + path := filepath.Join(dirPath, fileInfo.Name()) + + cleanPath, err := w.internalWatch(path, fileInfo) + if err != nil { + // No permission to read the file; that's not a problem: just skip. + // But do add it to w.fileExists to prevent it from being picked up + // as a "new" file later (it still shows up in the directory + // listing). + switch { + case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): + cleanPath = filepath.Clean(path) + default: + return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err) + } + } + + w.mu.Lock() + w.fileExists[cleanPath] = struct{}{} + w.mu.Unlock() + } + + return nil +} + +// Search the directory for new files and send an event for them. +// +// This functionality is to have the BSD watcher match the inotify, which sends +// a create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dir string) { + // Get all files + files, err := ioutil.ReadDir(dir) + if err != nil { + if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) { + return + } + } + + // Search for new files + for _, fi := range files { + err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + if !w.sendEvent(Event{Name: filePath, Op: Create}) { + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = struct{}{} + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// Register events with the queue. +func (w *Watcher) register(fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + for i, fd := range fds { + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // Register the events. + success, err := unix.Kevent(w.kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(w.kq, nil, events, nil) + if err != nil { + return nil, err + } + return events[0:n], nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go new file mode 100644 index 000000000..a9bb1c3c4 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -0,0 +1,66 @@ +//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows + +package fsnotify + +import ( + "fmt" + "runtime" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct{} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go new file mode 100644 index 000000000..ae392867c --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -0,0 +1,746 @@ +//go:build windows +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + port windows.Handle // Handle to completion port + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error + + mu sync.Mutex // Protects access to watches, isClosed + watches watchMap // Map of watches (key: i-number) + isClosed bool // Set to true when Close() is first called +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) + if err != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", err) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + + event := w.newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.quit: + } + return false +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + w.mu.Unlock() + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return errors.New("watcher already closed") + } + w.mu.Unlock() + + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for _, entry := range w.watches { + for _, watchEntry := range entry { + entries = append(entries, watchEntry.path) + } + } + + return entries +} + +// These options are from the old golang.org/x/exp/winfsnotify, where you could +// add various options to the watch. This has long since been removed. +// +// The "sys" in the name is misleading as they're not part of any "system". +// +// This should all be removed at some point, and just use windows.FILE_NOTIFY_* +const ( + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + sysFSIGNORED = 0x8000 +) + +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle windows.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov windows.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [65536]byte // 64K buffer +} + +type ( + indexMap map[uint64]*watch + watchMap map[uint32]indexMap +) + +func (w *Watcher) wakeupReader() error { + err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if err != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", err) + } + return nil +} + +func (w *Watcher) getDir(pathname string) (dir string, err error) { + attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) + if err != nil { + return "", os.NewSyscallError("GetFileAttributes", err) + } + if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func (w *Watcher) getIno(path string) (ino *inode, err error) { + h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), + windows.FILE_LIST_DIRECTORY, + windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, + nil, windows.OPEN_EXISTING, + windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0) + if err != nil { + return nil, os.NewSyscallError("CreateFile", err) + } + + var fi windows.ByHandleFileInformation + err = windows.GetFileInformationByHandle(h, &fi) + if err != nil { + windows.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", err) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := w.getDir(pathname) + if err != nil { + return err + } + + ino, err := w.getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + _, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0) + if err != nil { + windows.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", err) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + windows.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + + err = w.startRead(watchEntry) + if err != nil { + return err + } + + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := w.getDir(pathname) + if err != nil { + return err + } + ino, err := w.getIno(dir) + if err != nil { + return err + } + + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + + err = windows.CloseHandle(ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + if watch == nil { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + err := windows.CancelIo(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CancelIo", err)) + w.deleteWatch(watch) + } + mask := w.toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= w.toWindowsFlags(m) + } + if mask == 0 { + err := windows.CloseHandle(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + + rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if rdErr != nil { + err := os.NewSyscallError("ReadDirectoryChanges", rdErr) + if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n uint32 + key uintptr + ov *windows.Overlapped + ) + runtime.LockOSThread() + + for { + qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) + // This error is handled after the watch == nil check below. NOTE: this + // seems odd, note sure if it's correct. + + watch := (*watch)(unsafe.Pointer(ov)) + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + + err := windows.CloseHandle(w.port) + if err != nil { + err = os.NewSyscallError("CloseHandle", err) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch qErr { + case windows.ERROR_MORE_DATA: + if watch == nil { + w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case windows.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case windows.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.sendError(errors.New("short read in readEvents()")) + break + } + + // Point "raw" to the event in the buffer + raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + + // Create a buf that is the size of the path name + size := int(raw.FileNameLength / 2) + var buf []uint16 + // TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973 + sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) + sh.Len = size + sh.Cap = size + name := windows.UTF16ToString(buf) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case windows.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case windows.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case windows.FILE_ACTION_RENAMED_NEW_NAME: + // Update saved path of all sub-watches. + old := filepath.Join(watch.path, watch.rename) + w.mu.Lock() + for _, watchMap := range w.watches { + for _, ww := range watchMap { + if strings.HasPrefix(ww.path, old) { + ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old)) + } + } + } + w.mu.Unlock() + + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + w.sendEvent(fullname, watch.names[name]&mask) + } + if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == windows.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.sendError(errors.New( + "Windows system assumed buffer larger than it is, events have likely been missed.")) + break + } + } + + if err := w.startRead(watch); err != nil { + w.sendError(err) + } + } +} + +func (w *Watcher) toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSMODIFY != 0 { + m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { + switch action { + case windows.FILE_ACTION_ADDED: + return sysFSCREATE + case windows.FILE_ACTION_REMOVED: + return sysFSDELETE + case windows.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case windows.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go deleted file mode 100644 index b3ac3d8f5..000000000 --- a/vendor/github.com/fsnotify/fsnotify/fen.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build solaris -// +build solaris - -package fsnotify - -import ( - "errors" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 0f4ee52e8..30a5bf0f0 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -1,29 +1,37 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - //go:build !plan9 // +build !plan9 -// Package fsnotify provides a platform-independent interface for file system notifications. +// Package fsnotify provides a cross-platform interface for file system +// notifications. package fsnotify import ( - "bytes" "errors" "fmt" + "strings" ) -// Event represents a single file system notification. +// Event represents a file system notification. type Event struct { - Name string // Relative path to the file or directory. - Op Op // File operation that triggered the event. + // Path to the file or directory. + // + // Paths are relative to the input; for example with Add("dir") the Name + // will be set to "dir/file" if you create that file, but if you use + // Add("/path/to/dir") it will be "/path/to/dir/file". + Name string + + // File operation that triggered the event. + // + // This is a bitmask and some systems may send multiple operations at once. + // Use the Event.Has() method instead of comparing with ==. + Op Op } // Op describes a set of file operations. type Op uint32 -// These are the generalized file operations that can trigger a notification. +// The operations fsnotify can trigger; see the documentation on [Watcher] for a +// full description, and check them with [Event.Has]. const ( Create Op = 1 << iota Write @@ -32,38 +40,42 @@ const ( Chmod ) -func (op Op) String() string { - // Use a buffer for efficient string concatenation - var buffer bytes.Buffer +// Common errors that can be reported by a watcher +var ( + ErrNonExistentWatch = errors.New("can't remove non-existent watcher") + ErrEventOverflow = errors.New("fsnotify queue overflow") +) - if op&Create == Create { - buffer.WriteString("|CREATE") +func (op Op) String() string { + var b strings.Builder + if op.Has(Create) { + b.WriteString("|CREATE") } - if op&Remove == Remove { - buffer.WriteString("|REMOVE") + if op.Has(Remove) { + b.WriteString("|REMOVE") } - if op&Write == Write { - buffer.WriteString("|WRITE") + if op.Has(Write) { + b.WriteString("|WRITE") } - if op&Rename == Rename { - buffer.WriteString("|RENAME") + if op.Has(Rename) { + b.WriteString("|RENAME") } - if op&Chmod == Chmod { - buffer.WriteString("|CHMOD") + if op.Has(Chmod) { + b.WriteString("|CHMOD") } - if buffer.Len() == 0 { - return "" + if b.Len() == 0 { + return "[no events]" } - return buffer.String()[1:] // Strip leading pipe + return b.String()[1:] } -// String returns a string representation of the event in the form -// "file: REMOVE|WRITE|..." +// Has reports if this operation has the given operation. +func (o Op) Has(h Op) bool { return o&h == h } + +// Has reports if this event has the given operation. +func (e Event) Has(op Op) bool { return e.Op.Has(op) } + +// String returns a string representation of the event with their path. func (e Event) String() string { - return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) + return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } - -// Common errors that can be reported by a watcher -var ( - ErrEventOverflow = errors.New("fsnotify queue overflow") -) diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go deleted file mode 100644 index eb87699b5..000000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify.go +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - mu sync.Mutex // Map access - fd int - poller *fdPoller - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) - if fd == -1 { - return nil, errno - } - // Create epoll - poller, err := newFdPoller(fd) - if err != nil { - unix.Close(fd) - return nil, err - } - w := &Watcher{ - fd: fd, - poller: poller, - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - - // Wake up goroutine - w.poller.wake() - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - var flags uint32 = agnosticEvents - - w.mu.Lock() - defer w.mu.Unlock() - watchEntry := w.watches[name] - if watchEntry != nil { - flags |= watchEntry.flags | unix.IN_MASK_ADD - } - wd, errno := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - if watchEntry == nil { - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - } else { - watchEntry.wd = uint32(wd) - watchEntry.flags = flags - } - - return nil -} - -// Remove stops watching the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) - } - - // We successfully removed the watch if InotifyRmWatch doesn't return an - // error, we need to clean up our internal state to ensure it matches - // inotify's kernel state. - delete(w.paths, int(watch.wd)) - delete(w.watches, name) - - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously - // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE - // so that EINVAL means that the wd is being rm_watch()ed or its file removed - // by another thread and we have not received IN_IGNORE event. - success, errno := unix.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case. - // the only two possible errors are: - // EBADF, which happens when w.fd is not a valid file descriptor of any kind. - // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. - // Watch descriptors are invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. - return errno - } - - return nil -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - n int // Number of bytes read with read() - errno error // Syscall errno - ok bool // For poller.wait - ) - - defer close(w.doneResp) - defer close(w.Errors) - defer close(w.Events) - defer unix.Close(w.fd) - defer w.poller.close() - - for { - // See if we have been closed. - if w.isClosed() { - return - } - - ok, errno = w.poller.wait() - if errno != nil { - select { - case w.Errors <- errno: - case <-w.done: - return - } - continue - } - - if !ok { - continue - } - - n, errno = unix.Read(w.fd, buf[:]) - // If a signal interrupted execution, see if we've been asked to close, and try again. - // http://man7.org/linux/man-pages/man7/signal.7.html : - // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" - if errno == unix.EINTR { - continue - } - - // unix.Read might have been woken up by Close. If so, we're done. - if w.isClosed() { - return - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occurred while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - select { - case w.Errors <- err: - case <-w.done: - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - - mask := uint32(raw.Mask) - nameLen := uint32(raw.Len) - - if mask&unix.IN_Q_OVERFLOW != 0 { - select { - case w.Errors <- ErrEventOverflow: - case <-w.done: - return - } - } - - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - // IN_DELETE_SELF occurs when the file/directory being watched is removed. - // This is a sign to clean up the maps, otherwise we are no longer in sync - // with the inotify kernel state which has already deleted the watch - // automatically. - if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - delete(w.paths, int(raw.Wd)) - delete(w.watches, name) - } - w.mu.Unlock() - - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if !event.ignoreLinux(mask) { - select { - case w.Events <- event: - case <-w.done: - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// Certain types of events can be "ignored" and not sent over the Events -// channel. Such as events marked ignore by the kernel, or MODIFY events -// against files that do not exist. -func (e *Event) ignoreLinux(mask uint32) bool { - // Ignore anything the inotify API says to ignore - if mask&unix.IN_IGNORED == unix.IN_IGNORED { - return true - } - - // If the event is not a DELETE or RENAME, the file must exist. - // Otherwise the event is ignored. - // *Note*: this was put in place because it was seen that a MODIFY - // event was sent after the DELETE. This ignores that MODIFY and - // assumes a DELETE will come or has come if the file doesn't exist. - if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { - _, statErr := os.Lstat(e.Name) - return os.IsNotExist(statErr) - } - return false -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go deleted file mode 100644 index e9ff9439f..000000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package fsnotify - -import ( - "errors" - - "golang.org/x/sys/unix" -) - -type fdPoller struct { - fd int // File descriptor (as returned by the inotify_init() syscall) - epfd int // Epoll file descriptor - pipe [2]int // Pipe for waking up -} - -func emptyPoller(fd int) *fdPoller { - poller := new(fdPoller) - poller.fd = fd - poller.epfd = -1 - poller.pipe[0] = -1 - poller.pipe[1] = -1 - return poller -} - -// Create a new inotify poller. -// This creates an inotify handler, and an epoll handler. -func newFdPoller(fd int) (*fdPoller, error) { - var errno error - poller := emptyPoller(fd) - defer func() { - if errno != nil { - poller.close() - } - }() - poller.fd = fd - - // Create epoll fd - poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) - if poller.epfd == -1 { - return nil, errno - } - // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) - if errno != nil { - return nil, errno - } - - // Register inotify fd with epoll - event := unix.EpollEvent{ - Fd: int32(poller.fd), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) - if errno != nil { - return nil, errno - } - - // Register pipe fd with epoll - event = unix.EpollEvent{ - Fd: int32(poller.pipe[0]), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) - if errno != nil { - return nil, errno - } - - return poller, nil -} - -// Wait using epoll. -// Returns true if something is ready to be read, -// false if there is not. -func (poller *fdPoller) wait() (bool, error) { - // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. - // I don't know whether epoll_wait returns the number of events returned, - // or the total number of events ready. - // I decided to catch both by making the buffer one larger than the maximum. - events := make([]unix.EpollEvent, 7) - for { - n, errno := unix.EpollWait(poller.epfd, events, -1) - if n == -1 { - if errno == unix.EINTR { - continue - } - return false, errno - } - if n == 0 { - // If there are no events, try again. - continue - } - if n > 6 { - // This should never happen. More events were returned than should be possible. - return false, errors.New("epoll_wait returned more events than I know what to do with") - } - ready := events[:n] - epollhup := false - epollerr := false - epollin := false - for _, event := range ready { - if event.Fd == int32(poller.fd) { - if event.Events&unix.EPOLLHUP != 0 { - // This should not happen, but if it does, treat it as a wakeup. - epollhup = true - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the file descriptor, we should pretend - // something is ready to read, and let unix.Read pick up the error. - epollerr = true - } - if event.Events&unix.EPOLLIN != 0 { - // There is data to read. - epollin = true - } - } - if event.Fd == int32(poller.pipe[0]) { - if event.Events&unix.EPOLLHUP != 0 { - // Write pipe descriptor was closed, by us. This means we're closing down the - // watcher, and we should wake up. - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the pipe file descriptor. - // This is an absolute mystery, and should never ever happen. - return false, errors.New("Error on the pipe descriptor.") - } - if event.Events&unix.EPOLLIN != 0 { - // This is a regular wakeup, so we have to clear the buffer. - err := poller.clearWake() - if err != nil { - return false, err - } - } - } - } - - if epollhup || epollerr || epollin { - return true, nil - } - return false, nil - } -} - -// Close the write end of the poller. -func (poller *fdPoller) wake() error { - buf := make([]byte, 1) - n, errno := unix.Write(poller.pipe[1], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is full, poller will wake. - return nil - } - return errno - } - return nil -} - -func (poller *fdPoller) clearWake() error { - // You have to be woken up a LOT in order to get to 100! - buf := make([]byte, 100) - n, errno := unix.Read(poller.pipe[0], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is empty, someone else cleared our wake. - return nil - } - return errno - } - return nil -} - -// Close all poller file descriptors, but not the one passed to it. -func (poller *fdPoller) close() { - if poller.pipe[1] != -1 { - unix.Close(poller.pipe[1]) - } - if poller.pipe[0] != -1 { - unix.Close(poller.pipe[0]) - } - if poller.epfd != -1 { - unix.Close(poller.epfd) - } -} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go deleted file mode 100644 index 368f5b790..000000000 --- a/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ /dev/null @@ -1,522 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - - kq int // File descriptor (as returned by the kqueue() syscall). - - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Map of watched file descriptors (key: path). - externalWatches map[string]bool // Map of watches added by user of the library. - dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. - paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. - fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - kq, err := kqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - watches: make(map[string]int), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]bool), - externalWatches: make(map[string]bool), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - - // copy paths to remove while locked - var pathsToRemove = make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() - // unlock before calling Remove, which also locks - - for _, name := range pathsToRemove { - w.Remove(name) - } - - // send a "quit" message to the reader goroutine - close(w.done) - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.externalWatches[name] = true - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) - } - - const registerRemove = unix.EV_DELETE - if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for _, path := range w.paths { - wdir, _ := filepath.Split(path.name) - if filepath.Clean(wdir) == name { - if !w.externalWatches[path.name] { - pathsToRemove = append(pathsToRemove, path.name) - } - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// keventWaitTime to block on each read from kevent -var keventWaitTime = durationToTimespec(100 * time.Millisecond) - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets. - if fi.Mode()&os.ModeSocket == os.ModeSocket { - return "", nil - } - - // Don't watch named pipes. - if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return "", nil - } - - // Follow Symlinks - // Unfortunately, Linux can add bogus symlinks to watch list without - // issue, and Windows can't do symlinks period (AFAIK). To maintain - // consistency, we will act like everything is fine. There will simply - // be no file events for broken symlinks. - // Hence the returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[name] - w.mu.Unlock() - - if alreadyWatching { - return name, nil - } - - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - watchfd, err = unix.Open(name, openMode, 0700) - if watchfd == -1 { - return "", err - } - - isDir = fi.IsDir() - } - - const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE - if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - w.watches[name] = watchfd - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - eventBuffer := make([]unix.Kevent_t, 10) - -loop: - for { - // See if there is a message on the "done" channel - select { - case <-w.done: - break loop - default: - } - - // Get new events - kevents, err := read(w.kq, eventBuffer, &keventWaitTime) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - select { - case w.Errors <- err: - case <-w.done: - break loop - } - continue - } - - // Flush the events we received to the Events channel - for len(kevents) > 0 { - kevent := &kevents[0] - watchfd := int(kevent.Ident) - mask := uint32(kevent.Fflags) - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - event := newEvent(path.name, mask) - - if path.isDir && !(event.Op&Remove == Remove) { - // Double check to make sure the directory exists. This can happen when - // we do a rm -fr on a recursively watched folders and we receive a - // modification event first but the folder has been deleted and later - // receive the delete event - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - // mark is as delete event - event.Op |= Remove - } - } - - if event.Op&Rename == Rename || event.Op&Remove == Remove { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - // Send the event on the Events channel. - select { - case w.Events <- event: - case <-w.done: - break loop - } - } - - if event.Op&Remove == Remove { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); err == nil { - w.sendDirectoryChangeEvents(fileDir) - } - } - } else { - filePath := filepath.Clean(event.Name) - if fileInfo, err := os.Lstat(filePath); err == nil { - w.sendFileCreatedEventIfNew(filePath, fileInfo) - } - } - } - - // Move to next event - kevents = kevents[1:] - } - } - - // cleanup - err := unix.Close(w.kq) - if err != nil { - // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. - select { - case w.Errors <- err: - default: - } - } - close(w.Events) - close(w.Errors) -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -func newCreateEvent(name string) Event { - return Event{Name: name, Op: Create} -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } - - return nil -} - -// sendDirectoryEvents searches the directory for newly created files -// and sends them over the event channel. This functionality is to have -// the BSD version of fsnotify match Linux inotify which provides a -// create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - select { - case w.Errors <- err: - case <-w.done: - return - } - } - - // Search for new files - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - err := w.sendFileCreatedEventIfNew(filePath, fileInfo) - - if err != nil { - return - } - } -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - // Send create event - select { - case w.Events <- newCreateEvent(filePath): - case <-w.done: - return - } - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// kqueue creates a new kernel event queue and returns a descriptor. -func kqueue() (kq int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, err - } - return kq, nil -} - -// register events with the queue -func register(kq int, fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - - for i, fd := range fds { - // SetKevent converts int to the platform-specific types: - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // register the events - success, err := unix.Kevent(kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -// A timeout of nil blocks indefinitely, while 0 polls the queue. -func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(kq, nil, events, timeout) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -// durationToTimespec prepares a timeout value -func durationToTimespec(d time.Duration) unix.Timespec { - return unix.NsecToTimespec(d.Nanoseconds()) -} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh new file mode 100644 index 000000000..b09ef7683 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh @@ -0,0 +1,208 @@ +#!/usr/bin/env zsh +[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 +setopt err_exit no_unset pipefail extended_glob + +# Simple script to update the godoc comments on all watchers. Probably took me +# more time to write this than doing it manually, but ah well 🙃 + +watcher=$(</tmp/x + print -r -- $cmt >>/tmp/x + tail -n+$(( end + 1 )) $file >>/tmp/x + mv /tmp/x $file + done +} + +set-cmt '^type Watcher struct ' $watcher +set-cmt '^func NewWatcher(' $new +set-cmt '^func (w \*Watcher) Add(' $add +set-cmt '^func (w \*Watcher) Remove(' $remove +set-cmt '^func (w \*Watcher) Close(' $close +set-cmt '^func (w \*Watcher) WatchList(' $watchlist +set-cmt '^[[:space:]]*Events *chan Event$' $events +set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go similarity index 57% rename from vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go rename to vendor/github.com/fsnotify/fsnotify/system_bsd.go index 36cc3845b..4322b0b88 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,7 +1,3 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - //go:build freebsd || openbsd || netbsd || dragonfly // +build freebsd openbsd netbsd dragonfly diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go similarity index 52% rename from vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go rename to vendor/github.com/fsnotify/fsnotify/system_darwin.go index 98cd8476f..5da5ffa78 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,7 +1,3 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - //go:build darwin // +build darwin diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go deleted file mode 100644 index c02b75f7c..000000000 --- a/vendor/github.com/fsnotify/fsnotify/windows.go +++ /dev/null @@ -1,562 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) - if e != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", e) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - if w.isClosed { - return errors.New("watcher already closed") - } - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -const ( - // Options for AddWatch - sysFSONESHOT = 0x80000000 - sysFSONLYDIR = 0x1000000 - - // Events - sysFSACCESS = 0x1 - sysFSALLEVENTS = 0xfff - sysFSATTRIB = 0x4 - sysFSCLOSE = 0x18 - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - - // Special events - sysFSIGNORED = 0x8000 - sysFSQOVERFLOW = 0x4000 -) - -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - if mask&sysFSATTRIB == sysFSATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle syscall.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov syscall.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [4096]byte -} - -type indexMap map[uint64]*watch -type watchMap map[uint32]indexMap - -func (w *Watcher) wakeupReader() error { - e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if e != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", e) - } - return nil -} - -func getDir(pathname string) (dir string, err error) { - attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) - if e != nil { - return "", os.NewSyscallError("GetFileAttributes", e) - } - if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func getIno(path string) (ino *inode, err error) { - h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), - syscall.FILE_LIST_DIRECTORY, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) - if e != nil { - return nil, os.NewSyscallError("CreateFile", e) - } - var fi syscall.ByHandleFileInformation - if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { - syscall.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", e) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - if flags&sysFSONLYDIR != 0 && pathname != dir { - return nil - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { - syscall.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", e) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - syscall.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - if err = w.startRead(watchEntry); err != nil { - return err - } - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - if watch == nil { - return fmt.Errorf("can't remove non-existent watch for: %s", pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - if e := syscall.CancelIo(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CancelIo", e) - w.deleteWatch(watch) - } - mask := toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= toWindowsFlags(m) - } - if mask == 0 { - if e := syscall.CloseHandle(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CloseHandle", e) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if e != nil { - err := os.NewSyscallError("ReadDirectoryChanges", e) - if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n, key uint32 - ov *syscall.Overlapped - ) - runtime.LockOSThread() - - for { - e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) - watch := (*watch)(unsafe.Pointer(ov)) - - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - var err error - if e := syscall.CloseHandle(w.port); e != nil { - err = os.NewSyscallError("CloseHandle", e) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch e { - case syscall.ERROR_MORE_DATA: - if watch == nil { - w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case syscall.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case syscall.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.Events <- newEvent("", sysFSQOVERFLOW) - w.Errors <- errors.New("short read in readEvents()") - break - } - - // Point "raw" to the event in the buffer - raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) - name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case syscall.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case syscall.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - if w.sendEvent(fullname, watch.names[name]&mask) { - if watch.names[name]&sysFSONESHOT != 0 { - delete(watch.names, name) - } - } - } - if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == syscall.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") - break - } - } - - if err := w.startRead(watch); err != nil { - w.Errors <- err - } - } -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - event := newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -func toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSACCESS != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS - } - if mask&sysFSMODIFY != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sysFSATTRIB != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func toFSnotifyFlags(action uint32) uint64 { - switch action { - case syscall.FILE_ACTION_ADDED: - return sysFSCREATE - case syscall.FILE_ACTION_REMOVED: - return sysFSDELETE - case syscall.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/vendor/github.com/go-kit/log/README.md b/vendor/github.com/go-kit/log/README.md index a0931951d..806779465 100644 --- a/vendor/github.com/go-kit/log/README.md +++ b/vendor/github.com/go-kit/log/README.md @@ -1,5 +1,10 @@ # package log +[![Go Reference](https://pkg.go.dev/badge/github.com/go-kit/log.svg)](https://pkg.go.dev/github.com/go-kit/log) +[![Go Report Card](https://goreportcard.com/badge/go-kit/log)](https://goreportcard.com/report/go-kit/log) +[![GitHub Actions](https://github.com/go-kit/log/actions/workflows/test.yml/badge.svg)](https://github.com/go-kit/log/actions/workflows/test.yml) +[![Coverage Status](https://coveralls.io/repos/github/go-kit/log/badge.svg?branch=main)](https://coveralls.io/github/go-kit/log?branch=main) + `package log` provides a minimal interface for structured logging in services. It may be wrapped to encode conventions, enforce type-safety, provide leveled logging, and so on. It can be used for both typical application log events, diff --git a/vendor/github.com/go-kit/log/json_logger.go b/vendor/github.com/go-kit/log/json_logger.go index 0cedbf824..d0faed4f0 100644 --- a/vendor/github.com/go-kit/log/json_logger.go +++ b/vendor/github.com/go-kit/log/json_logger.go @@ -68,7 +68,7 @@ func safeString(str fmt.Stringer) (s string) { if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { s = "NULL" } else { - panic(panicVal) + s = fmt.Sprintf("PANIC in String method: %v", panicVal) } } }() @@ -82,7 +82,7 @@ func safeError(err error) (s interface{}) { if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { s = nil } else { - panic(panicVal) + s = fmt.Sprintf("PANIC in Error method: %v", panicVal) } } }() diff --git a/vendor/github.com/go-kit/log/level/doc.go b/vendor/github.com/go-kit/log/level/doc.go index 505d307b1..fd681dcf9 100644 --- a/vendor/github.com/go-kit/log/level/doc.go +++ b/vendor/github.com/go-kit/log/level/doc.go @@ -7,6 +7,17 @@ // logger = level.NewFilter(logger, level.AllowInfo()) // <-- // logger = log.With(logger, "ts", log.DefaultTimestampUTC) // +// It's also possible to configure log level from a string. For instance from +// a flag, environment variable or configuration file. +// +// fs := flag.NewFlagSet("myprogram") +// lvl := fs.String("log", "info", "debug, info, warn, error") +// +// var logger log.Logger +// logger = log.NewLogfmtLogger(os.Stderr) +// logger = level.NewFilter(logger, level.Allow(level.ParseDefault(*lvl, level.InfoValue()))) // <-- +// logger = log.With(logger, "ts", log.DefaultTimestampUTC) +// // Then, at the callsites, use one of the level.Debug, Info, Warn, or Error // helper methods to emit leveled log events. // diff --git a/vendor/github.com/go-kit/log/level/level.go b/vendor/github.com/go-kit/log/level/level.go index c94756c6b..c641d9855 100644 --- a/vendor/github.com/go-kit/log/level/level.go +++ b/vendor/github.com/go-kit/log/level/level.go @@ -1,6 +1,14 @@ package level -import "github.com/go-kit/log" +import ( + "errors" + "strings" + + "github.com/go-kit/log" +) + +// ErrInvalidLevelString is returned whenever an invalid string is passed to Parse. +var ErrInvalidLevelString = errors.New("invalid level string") // Error returns a logger that includes a Key/ErrorValue pair. func Error(logger log.Logger) log.Logger { @@ -66,6 +74,22 @@ func (l *logger) Log(keyvals ...interface{}) error { // Option sets a parameter for the leveled logger. type Option func(*logger) +// Allow the provided log level to pass. +func Allow(v Value) Option { + switch v { + case debugValue: + return AllowDebug() + case infoValue: + return AllowInfo() + case warnValue: + return AllowWarn() + case errorValue: + return AllowError() + default: + return AllowNone() + } +} + // AllowAll is an alias for AllowDebug. func AllowAll() Option { return AllowDebug() @@ -100,6 +124,33 @@ func allowed(allowed level) Option { return func(l *logger) { l.allowed = allowed } } +// Parse a string to its corresponding level value. Valid strings are "debug", +// "info", "warn", and "error". Strings are normalized via strings.TrimSpace and +// strings.ToLower. +func Parse(level string) (Value, error) { + switch strings.TrimSpace(strings.ToLower(level)) { + case debugValue.name: + return debugValue, nil + case infoValue.name: + return infoValue, nil + case warnValue.name: + return warnValue, nil + case errorValue.name: + return errorValue, nil + default: + return nil, ErrInvalidLevelString + } +} + +// ParseDefault calls Parse and returns the default Value on error. +func ParseDefault(level string, def Value) Value { + v, err := Parse(level) + if err != nil { + return def + } + return v +} + // ErrNotAllowed sets the error to return from Log when it squelches a log // event disallowed by the configured Allow[Level] option. By default, // ErrNotAllowed is nil; in this case the log event is squelched with no diff --git a/vendor/github.com/go-kit/log/staticcheck.conf b/vendor/github.com/go-kit/log/staticcheck.conf new file mode 100644 index 000000000..528438b97 --- /dev/null +++ b/vendor/github.com/go-kit/log/staticcheck.conf @@ -0,0 +1 @@ +checks = ["all"] diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml index 94ff801df..0cffafa7b 100644 --- a/vendor/github.com/go-logr/logr/.golangci.yaml +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -6,7 +6,6 @@ linters: disable-all: true enable: - asciicheck - - deadcode - errcheck - forcetypeassert - gocritic @@ -18,10 +17,8 @@ linters: - misspell - revive - staticcheck - - structcheck - typecheck - unused - - varcheck issues: exclude-use-default: false diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go index 9d92a38f1..99fe8be93 100644 --- a/vendor/github.com/go-logr/logr/discard.go +++ b/vendor/github.com/go-logr/logr/discard.go @@ -20,35 +20,5 @@ package logr // used whenever the caller is not interested in the logs. Logger instances // produced by this function always compare as equal. func Discard() Logger { - return Logger{ - level: 0, - sink: discardLogSink{}, - } -} - -// discardLogSink is a LogSink that discards all messages. -type discardLogSink struct{} - -// Verify that it actually implements the interface -var _ LogSink = discardLogSink{} - -func (l discardLogSink) Init(RuntimeInfo) { -} - -func (l discardLogSink) Enabled(int) bool { - return false -} - -func (l discardLogSink) Info(int, string, ...interface{}) { -} - -func (l discardLogSink) Error(error, string, ...interface{}) { -} - -func (l discardLogSink) WithValues(...interface{}) LogSink { - return l -} - -func (l discardLogSink) WithName(string) LogSink { - return l + return New(nil) } diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index c3b56b3d2..e027aea3f 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -21,7 +21,7 @@ limitations under the License. // to back that API. Packages in the Go ecosystem can depend on this package, // while callers can implement logging with whatever backend is appropriate. // -// Usage +// # Usage // // Logging is done using a Logger instance. Logger is a concrete type with // methods, which defers the actual logging to a LogSink interface. The main @@ -30,16 +30,20 @@ limitations under the License. // "structured logging". // // With Go's standard log package, we might write: -// log.Printf("setting target value %s", targetValue) +// +// log.Printf("setting target value %s", targetValue) // // With logr's structured logging, we'd write: -// logger.Info("setting target", "value", targetValue) +// +// logger.Info("setting target", "value", targetValue) // // Errors are much the same. Instead of: -// log.Printf("failed to open the pod bay door for user %s: %v", user, err) +// +// log.Printf("failed to open the pod bay door for user %s: %v", user, err) // // We'd write: -// logger.Error(err, "failed to open the pod bay door", "user", user) +// +// logger.Error(err, "failed to open the pod bay door", "user", user) // // Info() and Error() are very similar, but they are separate methods so that // LogSink implementations can choose to do things like attach additional @@ -47,7 +51,7 @@ limitations under the License. // always logged, regardless of the current verbosity. If there is no error // instance available, passing nil is valid. // -// Verbosity +// # Verbosity // // Often we want to log information only when the application in "verbose // mode". To write log lines that are more verbose, Logger has a V() method. @@ -58,20 +62,22 @@ limitations under the License. // Error messages do not have a verbosity level and are always logged. // // Where we might have written: -// if flVerbose >= 2 { -// log.Printf("an unusual thing happened") -// } +// +// if flVerbose >= 2 { +// log.Printf("an unusual thing happened") +// } // // We can write: -// logger.V(2).Info("an unusual thing happened") // -// Logger Names +// logger.V(2).Info("an unusual thing happened") +// +// # Logger Names // // Logger instances can have name strings so that all messages logged through // that instance have additional context. For example, you might want to add // a subsystem name: // -// logger.WithName("compactor").Info("started", "time", time.Now()) +// logger.WithName("compactor").Info("started", "time", time.Now()) // // The WithName() method returns a new Logger, which can be passed to // constructors or other functions for further use. Repeated use of WithName() @@ -82,25 +88,27 @@ limitations under the License. // joining operation (e.g. whitespace, commas, periods, slashes, brackets, // quotes, etc). // -// Saved Values +// # Saved Values // // Logger instances can store any number of key/value pairs, which will be // logged alongside all messages logged through that instance. For example, // you might want to create a Logger instance per managed object: // // With the standard log package, we might write: -// log.Printf("decided to set field foo to value %q for object %s/%s", -// targetValue, object.Namespace, object.Name) +// +// log.Printf("decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) // // With logr we'd write: -// // Elsewhere: set up the logger to log the object name. -// obj.logger = mainLogger.WithValues( -// "name", obj.name, "namespace", obj.namespace) // -// // later on... -// obj.logger.Info("setting foo", "value", targetValue) +// // Elsewhere: set up the logger to log the object name. +// obj.logger = mainLogger.WithValues( +// "name", obj.name, "namespace", obj.namespace) +// +// // later on... +// obj.logger.Info("setting foo", "value", targetValue) // -// Best Practices +// # Best Practices // // Logger has very few hard rules, with the goal that LogSink implementations // might have a lot of freedom to differentiate. There are, however, some @@ -124,15 +132,15 @@ limitations under the License. // around. For cases where passing a logger is optional, a pointer to Logger // should be used. // -// Key Naming Conventions +// # Key Naming Conventions // // Keys are not strictly required to conform to any specification or regex, but // it is recommended that they: -// * be human-readable and meaningful (not auto-generated or simple ordinals) -// * be constant (not dependent on input data) -// * contain only printable characters -// * not contain whitespace or punctuation -// * use lower case for simple keys and lowerCamelCase for more complex ones +// - be human-readable and meaningful (not auto-generated or simple ordinals) +// - be constant (not dependent on input data) +// - contain only printable characters +// - not contain whitespace or punctuation +// - use lower case for simple keys and lowerCamelCase for more complex ones // // These guidelines help ensure that log data is processed properly regardless // of the log implementation. For example, log implementations will try to @@ -141,51 +149,54 @@ limitations under the License. // While users are generally free to use key names of their choice, it's // generally best to avoid using the following keys, as they're frequently used // by implementations: -// * "caller": the calling information (file/line) of a particular log line -// * "error": the underlying error value in the `Error` method -// * "level": the log level -// * "logger": the name of the associated logger -// * "msg": the log message -// * "stacktrace": the stack trace associated with a particular log line or -// error (often from the `Error` message) -// * "ts": the timestamp for a log line +// - "caller": the calling information (file/line) of a particular log line +// - "error": the underlying error value in the `Error` method +// - "level": the log level +// - "logger": the name of the associated logger +// - "msg": the log message +// - "stacktrace": the stack trace associated with a particular log line or +// error (often from the `Error` message) +// - "ts": the timestamp for a log line // // Implementations are encouraged to make use of these keys to represent the // above concepts, when necessary (for example, in a pure-JSON output form, it // would be necessary to represent at least message and timestamp as ordinary // named values). // -// Break Glass +// # Break Glass // // Implementations may choose to give callers access to the underlying // logging implementation. The recommended pattern for this is: -// // Underlier exposes access to the underlying logging implementation. -// // Since callers only have a logr.Logger, they have to know which -// // implementation is in use, so this interface is less of an abstraction -// // and more of way to test type conversion. -// type Underlier interface { -// GetUnderlying() -// } +// +// // Underlier exposes access to the underlying logging implementation. +// // Since callers only have a logr.Logger, they have to know which +// // implementation is in use, so this interface is less of an abstraction +// // and more of way to test type conversion. +// type Underlier interface { +// GetUnderlying() +// } // // Logger grants access to the sink to enable type assertions like this: -// func DoSomethingWithImpl(log logr.Logger) { -// if underlier, ok := log.GetSink()(impl.Underlier) { -// implLogger := underlier.GetUnderlying() -// ... -// } -// } +// +// func DoSomethingWithImpl(log logr.Logger) { +// if underlier, ok := log.GetSink().(impl.Underlier); ok { +// implLogger := underlier.GetUnderlying() +// ... +// } +// } // // Custom `With*` functions can be implemented by copying the complete // Logger struct and replacing the sink in the copy: -// // WithFooBar changes the foobar parameter in the log sink and returns a -// // new logger with that modified sink. It does nothing for loggers where -// // the sink doesn't support that parameter. -// func WithFoobar(log logr.Logger, foobar int) logr.Logger { -// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok { -// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) -// } -// return log -// } +// +// // WithFooBar changes the foobar parameter in the log sink and returns a +// // new logger with that modified sink. It does nothing for loggers where +// // the sink doesn't support that parameter. +// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok { +// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +// } +// return log +// } // // Don't use New to construct a new Logger with a LogSink retrieved from an // existing Logger. Source code attribution might not work correctly and @@ -201,11 +212,14 @@ import ( ) // New returns a new Logger instance. This is primarily used by libraries -// implementing LogSink, rather than end users. +// implementing LogSink, rather than end users. Passing a nil sink will create +// a Logger which discards all log lines. func New(sink LogSink) Logger { logger := Logger{} logger.setSink(sink) - sink.Init(runtimeInfo) + if sink != nil { + sink.Init(runtimeInfo) + } return logger } @@ -244,7 +258,7 @@ type Logger struct { // Enabled tests whether this Logger is enabled. For example, commandline // flags might be used to set the logging verbosity and disable some info logs. func (l Logger) Enabled() bool { - return l.sink.Enabled(l.level) + return l.sink != nil && l.sink.Enabled(l.level) } // Info logs a non-error message with the given key/value pairs as context. @@ -254,6 +268,9 @@ func (l Logger) Enabled() bool { // information. The key/value pairs must alternate string keys and arbitrary // values. func (l Logger) Info(msg string, keysAndValues ...interface{}) { + if l.sink == nil { + return + } if l.Enabled() { if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() @@ -273,6 +290,9 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) { // triggered this log line, if present. The err parameter is optional // and nil may be passed instead of an error instance. func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { + if l.sink == nil { + return + } if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() } @@ -284,6 +304,9 @@ func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { // level means a log message is less important. Negative V-levels are treated // as 0. func (l Logger) V(level int) Logger { + if l.sink == nil { + return l + } if level < 0 { level = 0 } @@ -294,6 +317,9 @@ func (l Logger) V(level int) Logger { // WithValues returns a new Logger instance with additional key/value pairs. // See Info for documentation on how key/value pairs work. func (l Logger) WithValues(keysAndValues ...interface{}) Logger { + if l.sink == nil { + return l + } l.setSink(l.sink.WithValues(keysAndValues...)) return l } @@ -304,6 +330,9 @@ func (l Logger) WithValues(keysAndValues ...interface{}) Logger { // contain only letters, digits, and hyphens (see the package documentation for // more information). func (l Logger) WithName(name string) Logger { + if l.sink == nil { + return l + } l.setSink(l.sink.WithName(name)) return l } @@ -324,6 +353,9 @@ func (l Logger) WithName(name string) Logger { // WithCallDepth(1) because it works with implementions that support the // CallDepthLogSink and/or CallStackHelperLogSink interfaces. func (l Logger) WithCallDepth(depth int) Logger { + if l.sink == nil { + return l + } if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { l.setSink(withCallDepth.WithCallDepth(depth)) } @@ -345,6 +377,9 @@ func (l Logger) WithCallDepth(depth int) Logger { // implementation does not support either of these, the original Logger will be // returned. func (l Logger) WithCallStackHelper() (func(), Logger) { + if l.sink == nil { + return func() {}, l + } var helper func() if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { l.setSink(withCallDepth.WithCallDepth(1)) @@ -357,6 +392,11 @@ func (l Logger) WithCallStackHelper() (func(), Logger) { return helper, l } +// IsZero returns true if this logger is an uninitialized zero value +func (l Logger) IsZero() bool { + return l.sink == nil +} + // contextKey is how we find Loggers in a context.Context. type contextKey struct{} @@ -442,7 +482,7 @@ type LogSink interface { WithName(name string) LogSink } -// CallDepthLogSink represents a Logger that knows how to climb the call stack +// CallDepthLogSink represents a LogSink that knows how to climb the call stack // to identify the original call site and can offset the depth by a specified // number of frames. This is useful for users who have helper functions // between the "real" call site and the actual calls to Logger methods. @@ -467,7 +507,7 @@ type CallDepthLogSink interface { WithCallDepth(depth int) LogSink } -// CallStackHelperLogSink represents a Logger that knows how to climb +// CallStackHelperLogSink represents a LogSink that knows how to climb // the call stack to identify the original call site and can skip // intermediate helper functions if they mark themselves as // helper. Go's testing package uses that approach. diff --git a/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/vendor/github.com/go-openapi/jsonpointer/.travis.yml deleted file mode 100644 index 03a22fe06..000000000 --- a/vendor/github.com/go-openapi/jsonpointer/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.15.x -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml index f9381aee5..013fc1943 100644 --- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -1,8 +1,6 @@ linters-settings: govet: check-shadowing: true - golint: - min-confidence: 0 gocyclo: min-complexity: 30 maligned: @@ -12,6 +10,8 @@ linters-settings: goconst: min-len: 2 min-occurrences: 4 + paralleltest: + ignore-missing: true linters: enable-all: true disable: @@ -39,3 +39,12 @@ linters: - nestif - godot - errorlint + - varcheck + - interfacer + - deadcode + - golint + - ifshort + - structcheck + - nosnakecase + - varnamelen + - exhaustruct diff --git a/vendor/github.com/go-openapi/jsonreference/.travis.yml b/vendor/github.com/go-openapi/jsonreference/.travis.yml deleted file mode 100644 index 05482f4b9..000000000 --- a/vendor/github.com/go-openapi/jsonreference/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.x -install: -- go get gotest.tools/gotestsum -jobs: - include: - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go new file mode 100644 index 000000000..f0610cf1e --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go @@ -0,0 +1,69 @@ +package internal + +import ( + "net/url" + "regexp" + "strings" +) + +const ( + defaultHTTPPort = ":80" + defaultHTTPSPort = ":443" +) + +// Regular expressions used by the normalizations +var rxPort = regexp.MustCompile(`(:\d+)/?$`) +var rxDupSlashes = regexp.MustCompile(`/{2,}`) + +// NormalizeURL will normalize the specified URL +// This was added to replace a previous call to the no longer maintained purell library: +// The call that was used looked like the following: +// +// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) +// +// To explain all that was included in the call above, purell.FlagsSafe was really just the following: +// - FlagLowercaseScheme +// - FlagLowercaseHost +// - FlagRemoveDefaultPort +// - FlagRemoveDuplicateSlashes (and this was mixed in with the |) +// +// This also normalizes the URL into its urlencoded form by removing RawPath and RawFragment. +func NormalizeURL(u *url.URL) { + lowercaseScheme(u) + lowercaseHost(u) + removeDefaultPort(u) + removeDuplicateSlashes(u) + + u.RawPath = "" + u.RawFragment = "" +} + +func lowercaseScheme(u *url.URL) { + if len(u.Scheme) > 0 { + u.Scheme = strings.ToLower(u.Scheme) + } +} + +func lowercaseHost(u *url.URL) { + if len(u.Host) > 0 { + u.Host = strings.ToLower(u.Host) + } +} + +func removeDefaultPort(u *url.URL) { + if len(u.Host) > 0 { + scheme := strings.ToLower(u.Scheme) + u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { + if (scheme == "http" && val == defaultHTTPPort) || (scheme == "https" && val == defaultHTTPSPort) { + return "" + } + return val + }) + } +} + +func removeDuplicateSlashes(u *url.URL) { + if len(u.Path) > 0 { + u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") + } +} diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go index 3bc0a6e26..cfdef03e5 100644 --- a/vendor/github.com/go-openapi/jsonreference/reference.go +++ b/vendor/github.com/go-openapi/jsonreference/reference.go @@ -30,8 +30,8 @@ import ( "net/url" "strings" - "github.com/PuerkitoBio/purell" "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/jsonreference/internal" ) const ( @@ -114,7 +114,9 @@ func (r *Ref) parse(jsonReferenceString string) error { return err } - r.referenceURL, _ = url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) + internal.NormalizeURL(parsed) + + r.referenceURL = parsed refURL := r.referenceURL if refURL.Scheme != "" && refURL.Host != "" { diff --git a/vendor/github.com/go-openapi/swag/.gitattributes b/vendor/github.com/go-openapi/swag/.gitattributes new file mode 100644 index 000000000..49ad52766 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.gitattributes @@ -0,0 +1,2 @@ +# gofmt always uses LF, whereas Git uses CRLF on Windows. +*.go text eol=lf diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index 813c47aa6..bf503e400 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -37,3 +37,18 @@ linters: - gci - gocognit - paralleltest + - thelper + - ifshort + - gomoddirectives + - cyclop + - forcetypeassert + - ireturn + - tagliatelle + - varnamelen + - goimports + - tenv + - golint + - exhaustruct + - nilnil + - nonamedreturns + - nosnakecase diff --git a/vendor/github.com/go-openapi/swag/.travis.yml b/vendor/github.com/go-openapi/swag/.travis.yml deleted file mode 100644 index fc25a8872..000000000 --- a/vendor/github.com/go-openapi/swag/.travis.yml +++ /dev/null @@ -1,37 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.x -arch: -- amd64 -jobs: - include: - # include arch ppc, but only for latest go version - skip testing for race - - go: 1.x - arch: ppc64le - install: ~ - script: - - go test -v - - #- go: 1.x - # arch: arm - # install: ~ - # script: - # - go test -v - - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -language: go -notifications: - slack: - secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/swag/doc.go b/vendor/github.com/go-openapi/swag/doc.go index 8d2c8c501..55094cb74 100644 --- a/vendor/github.com/go-openapi/swag/doc.go +++ b/vendor/github.com/go-openapi/swag/doc.go @@ -17,16 +17,15 @@ Package swag contains a bunch of helper functions for go-openapi and go-swagger You may also use it standalone for your projects. - * convert between value and pointers for builtin types - * convert from string to builtin types (wraps strconv) - * fast json concatenation - * search in path - * load from file or http - * name mangling - + - convert between value and pointers for builtin types + - convert from string to builtin types (wraps strconv) + - fast json concatenation + - search in path + - load from file or http + - name mangling This repo has only few dependencies outside of the standard library: - * YAML utilities depend on gopkg.in/yaml.v2 + - YAML utilities depend on gopkg.in/yaml.v2 */ package swag diff --git a/vendor/github.com/go-openapi/swag/file.go b/vendor/github.com/go-openapi/swag/file.go new file mode 100644 index 000000000..16accc55f --- /dev/null +++ b/vendor/github.com/go-openapi/swag/file.go @@ -0,0 +1,33 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import "mime/multipart" + +// File represents an uploaded file. +type File struct { + Data multipart.File + Header *multipart.FileHeader +} + +// Read bytes from the file +func (f *File) Read(p []byte) (n int, err error) { + return f.Data.Read(p) +} + +// Close the file +func (f *File) Close() error { + return f.Data.Close() +} diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go index 9a6040972..00038c377 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -16,10 +16,11 @@ package swag import ( "fmt" - "io/ioutil" + "io" "log" "net/http" "net/url" + "os" "path/filepath" "runtime" "strings" @@ -40,13 +41,13 @@ var LoadHTTPCustomHeaders = map[string]string{} // LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in func LoadFromFileOrHTTP(path string) ([]byte, error) { - return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) + return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) } // LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in // timeout arg allows for per request overriding of the request timeout func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) { - return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(timeout))(path) + return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path) } // LoadStrategy returns a loader function for a given path or uri @@ -86,7 +87,7 @@ func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func( func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { return func(path string) ([]byte, error) { client := &http.Client{Timeout: timeout} - req, err := http.NewRequest("GET", path, nil) // nolint: noctx + req, err := http.NewRequest(http.MethodGet, path, nil) //nolint:noctx if err != nil { return nil, err } @@ -115,6 +116,6 @@ func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status) } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } } diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/github.com/go-openapi/swag/post_go18.go index c2e686d31..f5228b82c 100644 --- a/vendor/github.com/go-openapi/swag/post_go18.go +++ b/vendor/github.com/go-openapi/swag/post_go18.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.8 // +build go1.8 package swag diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go index eb2f2d8bc..7c7da9c08 100644 --- a/vendor/github.com/go-openapi/swag/post_go19.go +++ b/vendor/github.com/go-openapi/swag/post_go19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.9 // +build go1.9 package swag diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/github.com/go-openapi/swag/pre_go18.go index 6607f3393..2757d9b95 100644 --- a/vendor/github.com/go-openapi/swag/pre_go18.go +++ b/vendor/github.com/go-openapi/swag/pre_go18.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.8 // +build !go1.8 package swag diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go index 4bae187d1..0565db377 100644 --- a/vendor/github.com/go-openapi/swag/pre_go19.go +++ b/vendor/github.com/go-openapi/swag/pre_go19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.9 // +build !go1.9 package swag diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index 193702f2c..f78ab684a 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -99,10 +99,11 @@ const ( ) // JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute): -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) +// +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) func JoinByFormat(data []string, format string) []string { if len(data) == 0 { return data @@ -124,11 +125,11 @@ func JoinByFormat(data []string, format string) []string { } // SplitByFormat splits a string by a known format: -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) // +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) func SplitByFormat(data, format string) []string { if data == "" { return nil diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index ec9691440..f09ee609f 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -22,7 +22,7 @@ import ( "github.com/mailru/easyjson/jlexer" "github.com/mailru/easyjson/jwriter" - yaml "gopkg.in/yaml.v2" + yaml "gopkg.in/yaml.v3" ) // YAMLMatcher matches yaml @@ -43,16 +43,126 @@ func YAMLToJSON(data interface{}) (json.RawMessage, error) { // BytesToYAMLDoc converts a byte slice into a YAML document func BytesToYAMLDoc(data []byte) (interface{}, error) { - var canary map[interface{}]interface{} // validate this is an object and not a different type - if err := yaml.Unmarshal(data, &canary); err != nil { + var document yaml.Node // preserve order that is present in the document + if err := yaml.Unmarshal(data, &document); err != nil { return nil, err } + if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { + return nil, fmt.Errorf("only YAML documents that are objects are supported") + } + return &document, nil +} - var document yaml.MapSlice // preserve order that is present in the document - if err := yaml.Unmarshal(data, &document); err != nil { - return nil, err +func yamlNode(root *yaml.Node) (interface{}, error) { + switch root.Kind { + case yaml.DocumentNode: + return yamlDocument(root) + case yaml.SequenceNode: + return yamlSequence(root) + case yaml.MappingNode: + return yamlMapping(root) + case yaml.ScalarNode: + return yamlScalar(root) + case yaml.AliasNode: + return yamlNode(root.Alias) + default: + return nil, fmt.Errorf("unsupported YAML node type: %v", root.Kind) + } +} + +func yamlDocument(node *yaml.Node) (interface{}, error) { + if len(node.Content) != 1 { + return nil, fmt.Errorf("unexpected YAML Document node content length: %d", len(node.Content)) + } + return yamlNode(node.Content[0]) +} + +func yamlMapping(node *yaml.Node) (interface{}, error) { + m := make(JSONMapSlice, len(node.Content)/2) + + var j int + for i := 0; i < len(node.Content); i += 2 { + var nmi JSONMapItem + k, err := yamlStringScalarC(node.Content[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode YAML map key: %w", err) + } + nmi.Key = k + v, err := yamlNode(node.Content[i+1]) + if err != nil { + return nil, fmt.Errorf("unable to process YAML map value for key %q: %w", k, err) + } + nmi.Value = v + m[j] = nmi + j++ + } + return m, nil +} + +func yamlSequence(node *yaml.Node) (interface{}, error) { + s := make([]interface{}, 0) + + for i := 0; i < len(node.Content); i++ { + + v, err := yamlNode(node.Content[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode YAML sequence value: %w", err) + } + s = append(s, v) + } + return s, nil +} + +const ( // See https://yaml.org/type/ + yamlStringScalar = "tag:yaml.org,2002:str" + yamlIntScalar = "tag:yaml.org,2002:int" + yamlBoolScalar = "tag:yaml.org,2002:bool" + yamlFloatScalar = "tag:yaml.org,2002:float" + yamlTimestamp = "tag:yaml.org,2002:timestamp" + yamlNull = "tag:yaml.org,2002:null" +) + +func yamlScalar(node *yaml.Node) (interface{}, error) { + switch node.LongTag() { + case yamlStringScalar: + return node.Value, nil + case yamlBoolScalar: + b, err := strconv.ParseBool(node.Value) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w", node.Value, err) + } + return b, nil + case yamlIntScalar: + i, err := strconv.ParseInt(node.Value, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w", node.Value, err) + } + return i, nil + case yamlFloatScalar: + f, err := strconv.ParseFloat(node.Value, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w", node.Value, err) + } + return f, nil + case yamlTimestamp: + return node.Value, nil + case yamlNull: + return nil, nil + default: + return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag()) + } +} + +func yamlStringScalarC(node *yaml.Node) (string, error) { + if node.Kind != yaml.ScalarNode { + return "", fmt.Errorf("expecting a string scalar but got %q", node.Kind) + } + switch node.LongTag() { + case yamlStringScalar, yamlIntScalar, yamlFloatScalar: + return node.Value, nil + default: + return "", fmt.Errorf("YAML tag %q is not supported as map key", node.LongTag()) } - return document, nil } // JSONMapSlice represent a JSON object, with the order of keys maintained @@ -105,6 +215,113 @@ func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) { *s = result } +func (s JSONMapSlice) MarshalYAML() (interface{}, error) { + var n yaml.Node + n.Kind = yaml.DocumentNode + var nodes []*yaml.Node + for _, item := range s { + nn, err := json2yaml(item.Value) + if err != nil { + return nil, err + } + ns := []*yaml.Node{ + { + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: item.Key, + }, + nn, + } + nodes = append(nodes, ns...) + } + + n.Content = []*yaml.Node{ + { + Kind: yaml.MappingNode, + Content: nodes, + }, + } + + return yaml.Marshal(&n) +} + +func json2yaml(item interface{}) (*yaml.Node, error) { + switch val := item.(type) { + case JSONMapSlice: + var n yaml.Node + n.Kind = yaml.MappingNode + for i := range val { + childNode, err := json2yaml(&val[i].Value) + if err != nil { + return nil, err + } + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: val[i].Key, + }, childNode) + } + return &n, nil + case map[string]interface{}: + var n yaml.Node + n.Kind = yaml.MappingNode + for k, v := range val { + childNode, err := json2yaml(v) + if err != nil { + return nil, err + } + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: k, + }, childNode) + } + return &n, nil + case []interface{}: + var n yaml.Node + n.Kind = yaml.SequenceNode + for i := range val { + childNode, err := json2yaml(val[i]) + if err != nil { + return nil, err + } + n.Content = append(n.Content, childNode) + } + return &n, nil + case string: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: val, + }, nil + case float64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlFloatScalar, + Value: strconv.FormatFloat(val, 'f', -1, 64), + }, nil + case int64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: strconv.FormatInt(val, 10), + }, nil + case uint64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: strconv.FormatUint(val, 10), + }, nil + case bool: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlBoolScalar, + Value: strconv.FormatBool(val), + }, nil + } + return nil, nil +} + // JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice type JSONMapItem struct { Key string @@ -173,23 +390,10 @@ func transformData(input interface{}) (out interface{}, err error) { } switch in := input.(type) { - case yaml.MapSlice: - - o := make(JSONMapSlice, len(in)) - for i, mi := range in { - var nmi JSONMapItem - if nmi.Key, err = format(mi.Key); err != nil { - return nil, err - } - - v, ert := transformData(mi.Value) - if ert != nil { - return nil, ert - } - nmi.Value = v - o[i] = nmi - } - return o, nil + case yaml.Node: + return yamlNode(&in) + case *yaml.Node: + return yamlNode(in) case map[interface{}]interface{}: o := make(JSONMapSlice, 0, len(in)) for ke, va := range in { diff --git a/vendor/github.com/google/gnostic/LICENSE b/vendor/github.com/google/gnostic-models/LICENSE similarity index 100% rename from vendor/github.com/google/gnostic/LICENSE rename to vendor/github.com/google/gnostic-models/LICENSE diff --git a/vendor/github.com/google/gnostic/compiler/README.md b/vendor/github.com/google/gnostic-models/compiler/README.md similarity index 100% rename from vendor/github.com/google/gnostic/compiler/README.md rename to vendor/github.com/google/gnostic-models/compiler/README.md diff --git a/vendor/github.com/google/gnostic/compiler/context.go b/vendor/github.com/google/gnostic-models/compiler/context.go similarity index 100% rename from vendor/github.com/google/gnostic/compiler/context.go rename to vendor/github.com/google/gnostic-models/compiler/context.go diff --git a/vendor/github.com/google/gnostic/compiler/error.go b/vendor/github.com/google/gnostic-models/compiler/error.go similarity index 100% rename from vendor/github.com/google/gnostic/compiler/error.go rename to vendor/github.com/google/gnostic-models/compiler/error.go diff --git a/vendor/github.com/google/gnostic/compiler/extensions.go b/vendor/github.com/google/gnostic-models/compiler/extensions.go similarity index 97% rename from vendor/github.com/google/gnostic/compiler/extensions.go rename to vendor/github.com/google/gnostic-models/compiler/extensions.go index 5b5a916d2..250c81e8c 100644 --- a/vendor/github.com/google/gnostic/compiler/extensions.go +++ b/vendor/github.com/google/gnostic-models/compiler/extensions.go @@ -24,7 +24,7 @@ import ( "github.com/golang/protobuf/ptypes/any" yaml "gopkg.in/yaml.v3" - extensions "github.com/google/gnostic/extensions" + extensions "github.com/google/gnostic-models/extensions" ) // ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. diff --git a/vendor/github.com/google/gnostic/compiler/helpers.go b/vendor/github.com/google/gnostic-models/compiler/helpers.go similarity index 99% rename from vendor/github.com/google/gnostic/compiler/helpers.go rename to vendor/github.com/google/gnostic-models/compiler/helpers.go index 97ffaa513..975d65e8f 100644 --- a/vendor/github.com/google/gnostic/compiler/helpers.go +++ b/vendor/github.com/google/gnostic-models/compiler/helpers.go @@ -22,7 +22,7 @@ import ( "gopkg.in/yaml.v3" - "github.com/google/gnostic/jsonschema" + "github.com/google/gnostic-models/jsonschema" ) // compiler helper functions, usually called from generated code diff --git a/vendor/github.com/google/gnostic/compiler/main.go b/vendor/github.com/google/gnostic-models/compiler/main.go similarity index 100% rename from vendor/github.com/google/gnostic/compiler/main.go rename to vendor/github.com/google/gnostic-models/compiler/main.go diff --git a/vendor/github.com/google/gnostic/compiler/reader.go b/vendor/github.com/google/gnostic-models/compiler/reader.go similarity index 100% rename from vendor/github.com/google/gnostic/compiler/reader.go rename to vendor/github.com/google/gnostic-models/compiler/reader.go diff --git a/vendor/github.com/google/gnostic/extensions/README.md b/vendor/github.com/google/gnostic-models/extensions/README.md similarity index 100% rename from vendor/github.com/google/gnostic/extensions/README.md rename to vendor/github.com/google/gnostic-models/extensions/README.md diff --git a/vendor/github.com/google/gnostic/extensions/extension.pb.go b/vendor/github.com/google/gnostic-models/extensions/extension.pb.go similarity index 99% rename from vendor/github.com/google/gnostic/extensions/extension.pb.go rename to vendor/github.com/google/gnostic-models/extensions/extension.pb.go index a6a4ccca6..a71df8abe 100644 --- a/vendor/github.com/google/gnostic/extensions/extension.pb.go +++ b/vendor/github.com/google/gnostic-models/extensions/extension.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.18.1 +// protoc-gen-go v1.27.1 +// protoc v3.19.3 // source: extensions/extension.proto package gnostic_extension_v1 diff --git a/vendor/github.com/google/gnostic/extensions/extension.proto b/vendor/github.com/google/gnostic-models/extensions/extension.proto similarity index 100% rename from vendor/github.com/google/gnostic/extensions/extension.proto rename to vendor/github.com/google/gnostic-models/extensions/extension.proto diff --git a/vendor/github.com/google/gnostic/extensions/extensions.go b/vendor/github.com/google/gnostic-models/extensions/extensions.go similarity index 100% rename from vendor/github.com/google/gnostic/extensions/extensions.go rename to vendor/github.com/google/gnostic-models/extensions/extensions.go diff --git a/vendor/github.com/google/gnostic/jsonschema/README.md b/vendor/github.com/google/gnostic-models/jsonschema/README.md similarity index 100% rename from vendor/github.com/google/gnostic/jsonschema/README.md rename to vendor/github.com/google/gnostic-models/jsonschema/README.md diff --git a/vendor/github.com/google/gnostic/jsonschema/base.go b/vendor/github.com/google/gnostic-models/jsonschema/base.go similarity index 90% rename from vendor/github.com/google/gnostic/jsonschema/base.go rename to vendor/github.com/google/gnostic-models/jsonschema/base.go index 0af8b148b..5fcc4885a 100644 --- a/vendor/github.com/google/gnostic/jsonschema/base.go +++ b/vendor/github.com/google/gnostic-models/jsonschema/base.go @@ -1,3 +1,16 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. // THIS FILE IS AUTOMATICALLY GENERATED. @@ -81,4 +94,4 @@ YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5 IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6 IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1 -c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)} \ No newline at end of file +c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)} diff --git a/vendor/github.com/google/gnostic/jsonschema/display.go b/vendor/github.com/google/gnostic-models/jsonschema/display.go similarity index 100% rename from vendor/github.com/google/gnostic/jsonschema/display.go rename to vendor/github.com/google/gnostic-models/jsonschema/display.go diff --git a/vendor/github.com/google/gnostic/jsonschema/models.go b/vendor/github.com/google/gnostic-models/jsonschema/models.go similarity index 100% rename from vendor/github.com/google/gnostic/jsonschema/models.go rename to vendor/github.com/google/gnostic-models/jsonschema/models.go diff --git a/vendor/github.com/google/gnostic/jsonschema/operations.go b/vendor/github.com/google/gnostic-models/jsonschema/operations.go similarity index 100% rename from vendor/github.com/google/gnostic/jsonschema/operations.go rename to vendor/github.com/google/gnostic-models/jsonschema/operations.go diff --git a/vendor/github.com/google/gnostic/jsonschema/reader.go b/vendor/github.com/google/gnostic-models/jsonschema/reader.go similarity index 100% rename from vendor/github.com/google/gnostic/jsonschema/reader.go rename to vendor/github.com/google/gnostic-models/jsonschema/reader.go diff --git a/vendor/github.com/google/gnostic/jsonschema/schema.json b/vendor/github.com/google/gnostic-models/jsonschema/schema.json similarity index 100% rename from vendor/github.com/google/gnostic/jsonschema/schema.json rename to vendor/github.com/google/gnostic-models/jsonschema/schema.json diff --git a/vendor/github.com/google/gnostic/jsonschema/writer.go b/vendor/github.com/google/gnostic-models/jsonschema/writer.go similarity index 100% rename from vendor/github.com/google/gnostic/jsonschema/writer.go rename to vendor/github.com/google/gnostic-models/jsonschema/writer.go diff --git a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go similarity index 99% rename from vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go rename to vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go index 0f1790766..d71fe6d54 100644 --- a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go +++ b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go @@ -23,7 +23,7 @@ import ( "gopkg.in/yaml.v3" - "github.com/google/gnostic/compiler" + "github.com/google/gnostic-models/compiler" ) // Version returns the package name (and OpenAPI version). diff --git a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.pb.go b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go similarity index 99% rename from vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.pb.go rename to vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go index 06b60157c..65c4c913c 100644 --- a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.pb.go +++ b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go @@ -16,8 +16,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.18.1 +// protoc-gen-go v1.27.1 +// protoc v3.19.3 // source: openapiv2/OpenAPIv2.proto package openapi_v2 diff --git a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.proto b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto similarity index 100% rename from vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.proto rename to vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto diff --git a/vendor/github.com/google/gnostic/openapiv2/README.md b/vendor/github.com/google/gnostic-models/openapiv2/README.md similarity index 100% rename from vendor/github.com/google/gnostic/openapiv2/README.md rename to vendor/github.com/google/gnostic-models/openapiv2/README.md diff --git a/vendor/github.com/google/gnostic/openapiv2/document.go b/vendor/github.com/google/gnostic-models/openapiv2/document.go similarity index 96% rename from vendor/github.com/google/gnostic/openapiv2/document.go rename to vendor/github.com/google/gnostic-models/openapiv2/document.go index 0021ae871..e96ac0d6d 100644 --- a/vendor/github.com/google/gnostic/openapiv2/document.go +++ b/vendor/github.com/google/gnostic-models/openapiv2/document.go @@ -17,7 +17,7 @@ package openapi_v2 import ( "gopkg.in/yaml.v3" - "github.com/google/gnostic/compiler" + "github.com/google/gnostic-models/compiler" ) // ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation. diff --git a/vendor/github.com/google/gnostic/openapiv2/openapi-2.0.json b/vendor/github.com/google/gnostic-models/openapiv2/openapi-2.0.json similarity index 100% rename from vendor/github.com/google/gnostic/openapiv2/openapi-2.0.json rename to vendor/github.com/google/gnostic-models/openapiv2/openapi-2.0.json diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go similarity index 99% rename from vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go rename to vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go index 5f4a7025e..4b1131ce1 100644 --- a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go +++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go @@ -23,7 +23,7 @@ import ( "gopkg.in/yaml.v3" - "github.com/google/gnostic/compiler" + "github.com/google/gnostic-models/compiler" ) // Version returns the package name (and OpenAPI version). diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go similarity index 99% rename from vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go rename to vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go index 499e7f932..945b8d11f 100644 --- a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go +++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go @@ -16,8 +16,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.18.1 +// protoc-gen-go v1.27.1 +// protoc v3.19.3 // source: openapiv3/OpenAPIv3.proto package openapi_v3 diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto similarity index 100% rename from vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto rename to vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto diff --git a/vendor/github.com/google/gnostic/openapiv3/README.md b/vendor/github.com/google/gnostic-models/openapiv3/README.md similarity index 100% rename from vendor/github.com/google/gnostic/openapiv3/README.md rename to vendor/github.com/google/gnostic-models/openapiv3/README.md diff --git a/vendor/github.com/google/gnostic/openapiv3/document.go b/vendor/github.com/google/gnostic-models/openapiv3/document.go similarity index 96% rename from vendor/github.com/google/gnostic/openapiv3/document.go rename to vendor/github.com/google/gnostic-models/openapiv3/document.go index ef10d1d90..1cee46773 100644 --- a/vendor/github.com/google/gnostic/openapiv3/document.go +++ b/vendor/github.com/google/gnostic-models/openapiv3/document.go @@ -17,7 +17,7 @@ package openapi_v3 import ( "gopkg.in/yaml.v3" - "github.com/google/gnostic/compiler" + "github.com/google/gnostic-models/compiler" ) // ParseDocument reads an OpenAPI v3 description from a YAML/JSON representation. diff --git a/vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json b/vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json deleted file mode 100644 index d5caed162..000000000 --- a/vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json +++ /dev/null @@ -1,1251 +0,0 @@ -{ - "title": "A JSON Schema for OpenAPI 3.0.", - "id": "http://openapis.org/v3/schema.json#", - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "description": "This is the root document object of the OpenAPI document.", - "required": [ - "openapi", - "info", - "paths" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "openapi": { - "type": "string" - }, - "info": { - "$ref": "#/definitions/info" - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - }, - "paths": { - "$ref": "#/definitions/paths" - }, - "components": { - "$ref": "#/definitions/components" - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "tags": { - "type": "array", - "items": { - "$ref": "#/definitions/tag" - }, - "uniqueItems": true - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "definitions": { - "info": { - "type": "object", - "description": "The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience.", - "required": [ - "title", - "version" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "termsOfService": { - "type": "string" - }, - "contact": { - "$ref": "#/definitions/contact" - }, - "license": { - "$ref": "#/definitions/license" - }, - "version": { - "type": "string" - } - } - }, - "contact": { - "type": "object", - "description": "Contact information for the exposed API.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string", - "format": "uri" - }, - "email": { - "type": "string", - "format": "email" - } - } - }, - "license": { - "type": "object", - "description": "License information for the exposed API.", - "required": [ - "name" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "server": { - "type": "object", - "description": "An object representing a Server.", - "required": [ - "url" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "url": { - "type": "string" - }, - "description": { - "type": "string" - }, - "variables": { - "$ref": "#/definitions/serverVariables" - } - } - }, - "serverVariable": { - "type": "object", - "description": "An object representing a Server Variable for server URL template substitution.", - "required": [ - "default" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "enum": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "default": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "components": { - "type": "object", - "description": "Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "schemas": { - "$ref": "#/definitions/schemasOrReferences" - }, - "responses": { - "$ref": "#/definitions/responsesOrReferences" - }, - "parameters": { - "$ref": "#/definitions/parametersOrReferences" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "requestBodies": { - "$ref": "#/definitions/requestBodiesOrReferences" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "securitySchemes": { - "$ref": "#/definitions/securitySchemesOrReferences" - }, - "links": { - "$ref": "#/definitions/linksOrReferences" - }, - "callbacks": { - "$ref": "#/definitions/callbacksOrReferences" - } - } - }, - "paths": { - "type": "object", - "description": "Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints.", - "additionalProperties": false, - "patternProperties": { - "^/": { - "$ref": "#/definitions/pathItem" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - } - }, - "pathItem": { - "type": "object", - "description": "Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "get": { - "$ref": "#/definitions/operation" - }, - "put": { - "$ref": "#/definitions/operation" - }, - "post": { - "$ref": "#/definitions/operation" - }, - "delete": { - "$ref": "#/definitions/operation" - }, - "options": { - "$ref": "#/definitions/operation" - }, - "head": { - "$ref": "#/definitions/operation" - }, - "patch": { - "$ref": "#/definitions/operation" - }, - "trace": { - "$ref": "#/definitions/operation" - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/parameterOrReference" - }, - "uniqueItems": true - } - } - }, - "operation": { - "type": "object", - "description": "Describes a single API operation on a path.", - "required": [ - "responses" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "operationId": { - "type": "string" - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/parameterOrReference" - }, - "uniqueItems": true - }, - "requestBody": { - "$ref": "#/definitions/requestBodyOrReference" - }, - "responses": { - "$ref": "#/definitions/responses" - }, - "callbacks": { - "$ref": "#/definitions/callbacksOrReferences" - }, - "deprecated": { - "type": "boolean" - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - } - } - }, - "externalDocs": { - "type": "object", - "description": "Allows referencing an external resource for extended documentation.", - "required": [ - "url" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "parameter": { - "type": "object", - "description": "Describes a single operation parameter. A unique parameter is defined by a combination of a name and location.", - "required": [ - "name", - "in" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "in": { - "type": "string" - }, - "description": { - "type": "string" - }, - "required": { - "type": "boolean" - }, - "deprecated": { - "type": "boolean" - }, - "allowEmptyValue": { - "type": "boolean" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - }, - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - } - } - }, - "requestBody": { - "type": "object", - "description": "Describes a single request body.", - "required": [ - "content" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - }, - "required": { - "type": "boolean" - } - } - }, - "mediaType": { - "type": "object", - "description": "Each Media Type Object provides schema and examples for the media type identified by its key.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "encoding": { - "$ref": "#/definitions/encodings" - } - } - }, - "encoding": { - "type": "object", - "description": "A single encoding definition applied to a single schema property.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "contentType": { - "type": "string" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - } - } - }, - "responses": { - "type": "object", - "description": "A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call.", - "additionalProperties": false, - "patternProperties": { - "^([0-9X]{3})$": { - "$ref": "#/definitions/responseOrReference" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "default": { - "$ref": "#/definitions/responseOrReference" - } - } - }, - "response": { - "type": "object", - "description": "Describes a single response from an API Operation, including design-time, static `links` to operations based on the response.", - "required": [ - "description" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - }, - "links": { - "$ref": "#/definitions/linksOrReferences" - } - } - }, - "callback": { - "type": "object", - "description": "A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation.", - "additionalProperties": false, - "patternProperties": { - "^": { - "$ref": "#/definitions/pathItem" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - } - }, - "example": { - "type": "object", - "description": "", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "value": { - "$ref": "#/definitions/any" - }, - "externalValue": { - "type": "string" - } - } - }, - "link": { - "type": "object", - "description": "The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "operationRef": { - "type": "string" - }, - "operationId": { - "type": "string" - }, - "parameters": { - "$ref": "#/definitions/anysOrExpressions" - }, - "requestBody": { - "$ref": "#/definitions/anyOrExpression" - }, - "description": { - "type": "string" - }, - "server": { - "$ref": "#/definitions/server" - } - } - }, - "header": { - "type": "object", - "description": "The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`).", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "required": { - "type": "boolean" - }, - "deprecated": { - "type": "boolean" - }, - "allowEmptyValue": { - "type": "boolean" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - }, - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - } - } - }, - "tag": { - "type": "object", - "description": "Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances.", - "required": [ - "name" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - } - }, - "reference": { - "type": "object", - "description": "A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification.", - "required": [ - "$ref" - ], - "additionalProperties": false, - "properties": { - "$ref": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "schema": { - "type": "object", - "description": "The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "nullable": { - "type": "boolean" - }, - "discriminator": { - "$ref": "#/definitions/discriminator" - }, - "readOnly": { - "type": "boolean" - }, - "writeOnly": { - "type": "boolean" - }, - "xml": { - "$ref": "#/definitions/xml" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": { - "$ref": "#/definitions/any" - }, - "deprecated": { - "type": "boolean" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxLength" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minLength" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxItems" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minItems" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "maxProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxProperties" - }, - "minProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minProperties" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/required" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "type": { - "type": "string" - }, - "allOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "oneOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "anyOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "not": { - "$ref": "#/definitions/schema" - }, - "items": { - "anyOf": [ - { - "$ref": "#/definitions/schemaOrReference" - }, - { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - } - ] - }, - "properties": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schemaOrReference" - } - }, - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/schemaOrReference" - }, - { - "type": "boolean" - } - ] - }, - "default": { - "$ref": "#/definitions/defaultType" - }, - "description": { - "type": "string" - }, - "format": { - "type": "string" - } - } - }, - "discriminator": { - "type": "object", - "description": "When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered.", - "required": [ - "propertyName" - ], - "additionalProperties": false, - "properties": { - "propertyName": { - "type": "string" - }, - "mapping": { - "$ref": "#/definitions/strings" - } - } - }, - "xml": { - "type": "object", - "description": "A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "attribute": { - "type": "boolean" - }, - "wrapped": { - "type": "boolean" - } - } - }, - "securityScheme": { - "type": "object", - "description": "Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header or as a query parameter), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect Discovery.", - "required": [ - "type" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "type": { - "type": "string" - }, - "description": { - "type": "string" - }, - "name": { - "type": "string" - }, - "in": { - "type": "string" - }, - "scheme": { - "type": "string" - }, - "bearerFormat": { - "type": "string" - }, - "flows": { - "$ref": "#/definitions/oauthFlows" - }, - "openIdConnectUrl": { - "type": "string" - } - } - }, - "oauthFlows": { - "type": "object", - "description": "Allows configuration of the supported OAuth Flows.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "implicit": { - "$ref": "#/definitions/oauthFlow" - }, - "password": { - "$ref": "#/definitions/oauthFlow" - }, - "clientCredentials": { - "$ref": "#/definitions/oauthFlow" - }, - "authorizationCode": { - "$ref": "#/definitions/oauthFlow" - } - } - }, - "oauthFlow": { - "type": "object", - "description": "Configuration details for a supported OAuth Flow", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "authorizationUrl": { - "type": "string" - }, - "tokenUrl": { - "type": "string" - }, - "refreshUrl": { - "type": "string" - }, - "scopes": { - "$ref": "#/definitions/strings" - } - } - }, - "securityRequirement": { - "type": "object", - "description": "Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the Open API object or Operation Object, only one of Security Requirement Objects in the list needs to be satisfied to authorize the request.", - "additionalProperties": false, - "patternProperties": { - "^[a-zA-Z0-9\\.\\-_]+$": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - } - }, - "anyOrExpression": { - "oneOf": [ - { - "$ref": "#/definitions/any" - }, - { - "$ref": "#/definitions/expression" - } - ] - }, - "callbackOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/callback" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "exampleOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/example" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "headerOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/header" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "linkOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/link" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "parameterOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/parameter" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "requestBodyOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/requestBody" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "responseOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/response" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "schemaOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "securitySchemeOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/securityScheme" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "anysOrExpressions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/anyOrExpression" - } - }, - "callbacksOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/callbackOrReference" - } - }, - "encodings": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/encoding" - } - }, - "examplesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/exampleOrReference" - } - }, - "headersOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/headerOrReference" - } - }, - "linksOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/linkOrReference" - } - }, - "mediaTypes": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/mediaType" - } - }, - "parametersOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/parameterOrReference" - } - }, - "requestBodiesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/requestBodyOrReference" - } - }, - "responsesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/responseOrReference" - } - }, - "schemasOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schemaOrReference" - } - }, - "securitySchemesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/securitySchemeOrReference" - } - }, - "serverVariables": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/serverVariable" - } - }, - "strings": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "object": { - "type": "object", - "additionalProperties": true - }, - "any": { - "additionalProperties": true - }, - "expression": { - "type": "object", - "additionalProperties": true - }, - "specificationExtension": { - "description": "Any property starting with x- is valid.", - "oneOf": [ - { - "type": "null" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "string" - }, - { - "type": "object" - }, - { - "type": "array" - } - ] - }, - "defaultType": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "array" - }, - { - "type": "object" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "string" - } - ] - } - } -} diff --git a/vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json b/vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json deleted file mode 100644 index ed0b83adf..000000000 --- a/vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json +++ /dev/null @@ -1,1250 +0,0 @@ -{ - "title": "A JSON Schema for OpenAPI 3.0.", - "id": "http://openapis.org/v3/schema.json#", - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "description": "This is the root document object of the OpenAPI document.", - "required": [ - "openapi", - "info", - "paths" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "openapi": { - "type": "string" - }, - "info": { - "$ref": "#/definitions/info" - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - }, - "paths": { - "$ref": "#/definitions/paths" - }, - "components": { - "$ref": "#/definitions/components" - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "tags": { - "type": "array", - "items": { - "$ref": "#/definitions/tag" - }, - "uniqueItems": true - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "definitions": { - "info": { - "type": "object", - "description": "The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience.", - "required": [ - "title", - "version" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "termsOfService": { - "type": "string" - }, - "contact": { - "$ref": "#/definitions/contact" - }, - "license": { - "$ref": "#/definitions/license" - }, - "version": { - "type": "string" - }, - "summary": { - "type": "string" - } - } - }, - "contact": { - "type": "object", - "description": "Contact information for the exposed API.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string", - "format": "uri" - }, - "email": { - "type": "string", - "format": "email" - } - } - }, - "license": { - "type": "object", - "description": "License information for the exposed API.", - "required": [ - "name" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "server": { - "type": "object", - "description": "An object representing a Server.", - "required": [ - "url" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "url": { - "type": "string" - }, - "description": { - "type": "string" - }, - "variables": { - "$ref": "#/definitions/serverVariables" - } - } - }, - "serverVariable": { - "type": "object", - "description": "An object representing a Server Variable for server URL template substitution.", - "required": [ - "default" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "enum": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "default": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "components": { - "type": "object", - "description": "Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "schemas": { - "$ref": "#/definitions/schemasOrReferences" - }, - "responses": { - "$ref": "#/definitions/responsesOrReferences" - }, - "parameters": { - "$ref": "#/definitions/parametersOrReferences" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "requestBodies": { - "$ref": "#/definitions/requestBodiesOrReferences" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "securitySchemes": { - "$ref": "#/definitions/securitySchemesOrReferences" - }, - "links": { - "$ref": "#/definitions/linksOrReferences" - }, - "callbacks": { - "$ref": "#/definitions/callbacksOrReferences" - } - } - }, - "paths": { - "type": "object", - "description": "Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints.", - "additionalProperties": false, - "patternProperties": { - "^/": { - "$ref": "#/definitions/pathItem" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - } - }, - "pathItem": { - "type": "object", - "description": "Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "get": { - "$ref": "#/definitions/operation" - }, - "put": { - "$ref": "#/definitions/operation" - }, - "post": { - "$ref": "#/definitions/operation" - }, - "delete": { - "$ref": "#/definitions/operation" - }, - "options": { - "$ref": "#/definitions/operation" - }, - "head": { - "$ref": "#/definitions/operation" - }, - "patch": { - "$ref": "#/definitions/operation" - }, - "trace": { - "$ref": "#/definitions/operation" - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/parameterOrReference" - }, - "uniqueItems": true - } - } - }, - "operation": { - "type": "object", - "description": "Describes a single API operation on a path.", - "required": [ - "responses" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "operationId": { - "type": "string" - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/parameterOrReference" - }, - "uniqueItems": true - }, - "requestBody": { - "$ref": "#/definitions/requestBodyOrReference" - }, - "responses": { - "$ref": "#/definitions/responses" - }, - "callbacks": { - "$ref": "#/definitions/callbacksOrReferences" - }, - "deprecated": { - "type": "boolean" - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - } - } - }, - "externalDocs": { - "type": "object", - "description": "Allows referencing an external resource for extended documentation.", - "required": [ - "url" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "parameter": { - "type": "object", - "description": "Describes a single operation parameter. A unique parameter is defined by a combination of a name and location.", - "required": [ - "name", - "in" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "in": { - "type": "string" - }, - "description": { - "type": "string" - }, - "required": { - "type": "boolean" - }, - "deprecated": { - "type": "boolean" - }, - "allowEmptyValue": { - "type": "boolean" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - }, - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - } - } - }, - "requestBody": { - "type": "object", - "description": "Describes a single request body.", - "required": [ - "content" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - }, - "required": { - "type": "boolean" - } - } - }, - "mediaType": { - "type": "object", - "description": "Each Media Type Object provides schema and examples for the media type identified by its key.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "encoding": { - "$ref": "#/definitions/encodings" - } - } - }, - "encoding": { - "type": "object", - "description": "A single encoding definition applied to a single schema property.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "contentType": { - "type": "string" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - } - } - }, - "responses": { - "type": "object", - "description": "A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call.", - "additionalProperties": false, - "patternProperties": { - "^([0-9X]{3})$": { - "$ref": "#/definitions/responseOrReference" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "default": { - "$ref": "#/definitions/responseOrReference" - } - } - }, - "response": { - "type": "object", - "description": "Describes a single response from an API Operation, including design-time, static `links` to operations based on the response.", - "required": [ - "description" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - }, - "links": { - "$ref": "#/definitions/linksOrReferences" - } - } - }, - "callback": { - "type": "object", - "description": "A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation.", - "additionalProperties": false, - "patternProperties": { - "^": { - "$ref": "#/definitions/pathItem" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - } - }, - "example": { - "type": "object", - "description": "", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "value": { - "$ref": "#/definitions/any" - }, - "externalValue": { - "type": "string" - } - } - }, - "link": { - "type": "object", - "description": "The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "operationRef": { - "type": "string" - }, - "operationId": { - "type": "string" - }, - "parameters": { - "$ref": "#/definitions/anyOrExpression" - }, - "requestBody": { - "$ref": "#/definitions/anyOrExpression" - }, - "description": { - "type": "string" - }, - "server": { - "$ref": "#/definitions/server" - } - } - }, - "header": { - "type": "object", - "description": "The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`).", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "required": { - "type": "boolean" - }, - "deprecated": { - "type": "boolean" - }, - "allowEmptyValue": { - "type": "boolean" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - }, - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - } - } - }, - "tag": { - "type": "object", - "description": "Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances.", - "required": [ - "name" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - } - }, - "reference": { - "type": "object", - "description": "A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification.", - "required": [ - "$ref" - ], - "additionalProperties": false, - "properties": { - "$ref": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "schema": { - "type": "object", - "description": "The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "nullable": { - "type": "boolean" - }, - "discriminator": { - "$ref": "#/definitions/discriminator" - }, - "readOnly": { - "type": "boolean" - }, - "writeOnly": { - "type": "boolean" - }, - "xml": { - "$ref": "#/definitions/xml" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": { - "$ref": "#/definitions/any" - }, - "deprecated": { - "type": "boolean" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxLength" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minLength" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxItems" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minItems" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "maxProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxProperties" - }, - "minProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minProperties" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/required" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "type": { - "type": "string" - }, - "allOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "oneOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "anyOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "not": { - "$ref": "#/definitions/schema" - }, - "items": { - "anyOf": [ - { - "$ref": "#/definitions/schemaOrReference" - }, - { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - } - ] - }, - "properties": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schemaOrReference" - } - }, - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/schemaOrReference" - }, - { - "type": "boolean" - } - ] - }, - "default": { - "$ref": "#/definitions/defaultType" - }, - "description": { - "type": "string" - }, - "format": { - "type": "string" - } - } - }, - "discriminator": { - "type": "object", - "description": "When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered.", - "required": [ - "propertyName" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "propertyName": { - "type": "string" - }, - "mapping": { - "$ref": "#/definitions/strings" - } - } - }, - "xml": { - "type": "object", - "description": "A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "attribute": { - "type": "boolean" - }, - "wrapped": { - "type": "boolean" - } - } - }, - "securityScheme": { - "type": "object", - "description": "Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header, a cookie parameter or as a query parameter), mutual TLS (use of a client certificate), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect. Please note that currently (2019) the implicit flow is about to be deprecated OAuth 2.0 Security Best Current Practice. Recommended for most use case is Authorization Code Grant flow with PKCE.", - "required": [ - "type" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "type": { - "type": "string" - }, - "description": { - "type": "string" - }, - "name": { - "type": "string" - }, - "in": { - "type": "string" - }, - "scheme": { - "type": "string" - }, - "bearerFormat": { - "type": "string" - }, - "flows": { - "$ref": "#/definitions/oauthFlows" - }, - "openIdConnectUrl": { - "type": "string" - } - } - }, - "oauthFlows": { - "type": "object", - "description": "Allows configuration of the supported OAuth Flows.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "implicit": { - "$ref": "#/definitions/oauthFlow" - }, - "password": { - "$ref": "#/definitions/oauthFlow" - }, - "clientCredentials": { - "$ref": "#/definitions/oauthFlow" - }, - "authorizationCode": { - "$ref": "#/definitions/oauthFlow" - } - } - }, - "oauthFlow": { - "type": "object", - "description": "Configuration details for a supported OAuth Flow", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "authorizationUrl": { - "type": "string" - }, - "tokenUrl": { - "type": "string" - }, - "refreshUrl": { - "type": "string" - }, - "scopes": { - "$ref": "#/definitions/strings" - } - } - }, - "securityRequirement": { - "type": "object", - "description": "Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the OpenAPI Object or Operation Object, only one of the Security Requirement Objects in the list needs to be satisfied to authorize the request.", - "additionalProperties": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - }, - "anyOrExpression": { - "oneOf": [ - { - "$ref": "#/definitions/any" - }, - { - "$ref": "#/definitions/expression" - } - ] - }, - "callbackOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/callback" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "exampleOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/example" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "headerOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/header" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "linkOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/link" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "parameterOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/parameter" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "requestBodyOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/requestBody" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "responseOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/response" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "schemaOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "securitySchemeOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/securityScheme" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "callbacksOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/callbackOrReference" - } - }, - "encodings": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/encoding" - } - }, - "examplesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/exampleOrReference" - } - }, - "headersOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/headerOrReference" - } - }, - "linksOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/linkOrReference" - } - }, - "mediaTypes": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/mediaType" - } - }, - "parametersOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/parameterOrReference" - } - }, - "requestBodiesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/requestBodyOrReference" - } - }, - "responsesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/responseOrReference" - } - }, - "schemasOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schemaOrReference" - } - }, - "securitySchemesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/securitySchemeOrReference" - } - }, - "serverVariables": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/serverVariable" - } - }, - "strings": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "object": { - "type": "object", - "additionalProperties": true - }, - "any": { - "additionalProperties": true - }, - "expression": { - "type": "object", - "additionalProperties": true - }, - "specificationExtension": { - "description": "Any property starting with x- is valid.", - "oneOf": [ - { - "type": "null" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "string" - }, - { - "type": "object" - }, - { - "type": "array" - } - ] - }, - "defaultType": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "array" - }, - { - "type": "object" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "string" - } - ] - } - } -} diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml index f8684d99f..061d72ae0 100644 --- a/vendor/github.com/google/gofuzz/.travis.yml +++ b/vendor/github.com/google/gofuzz/.travis.yml @@ -1,13 +1,10 @@ language: go go: - - 1.4 - - 1.3 - - 1.2 - - tip - -install: - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + - 1.11.x + - 1.12.x + - 1.13.x + - master script: - go test -cover diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md index 51cf5cd1a..97c1b34fd 100644 --- a/vendor/github.com/google/gofuzz/CONTRIBUTING.md +++ b/vendor/github.com/google/gofuzz/CONTRIBUTING.md @@ -1,7 +1,7 @@ # How to contribute # We'd love to accept your patches and contributions to this project. There are -a just a few small guidelines you need to follow. +just a few small guidelines you need to follow. ## Contributor License Agreement ## diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md index 386c2a457..b503aae7d 100644 --- a/vendor/github.com/google/gofuzz/README.md +++ b/vendor/github.com/google/gofuzz/README.md @@ -68,4 +68,22 @@ f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. See more examples in ```example_test.go```. +You can use this library for easier [go-fuzz](https://github.com/dvyukov/go-fuzz)ing. +go-fuzz provides the user a byte-slice, which should be converted to different inputs +for the tested function. This library can help convert the byte slice. Consider for +example a fuzz test for a the function `mypackage.MyFunc` that takes an int arguments: +```go +// +build gofuzz +package mypackage + +import fuzz "github.com/google/gofuzz" + +func Fuzz(data []byte) int { + var i int + fuzz.NewFromGoFuzz(data).Fuzz(&i) + MyFunc(i) + return 0 +} +``` + Happy testing! diff --git a/vendor/github.com/google/gofuzz/bytesource/bytesource.go b/vendor/github.com/google/gofuzz/bytesource/bytesource.go new file mode 100644 index 000000000..5bb365949 --- /dev/null +++ b/vendor/github.com/google/gofuzz/bytesource/bytesource.go @@ -0,0 +1,81 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package bytesource provides a rand.Source64 that is determined by a slice of bytes. +package bytesource + +import ( + "bytes" + "encoding/binary" + "io" + "math/rand" +) + +// ByteSource implements rand.Source64 determined by a slice of bytes. The random numbers are +// generated from each 8 bytes in the slice, until the last bytes are consumed, from which a +// fallback pseudo random source is created in case more random numbers are required. +// It also exposes a `bytes.Reader` API, which lets callers consume the bytes directly. +type ByteSource struct { + *bytes.Reader + fallback rand.Source +} + +// New returns a new ByteSource from a given slice of bytes. +func New(input []byte) *ByteSource { + s := &ByteSource{ + Reader: bytes.NewReader(input), + fallback: rand.NewSource(0), + } + if len(input) > 0 { + s.fallback = rand.NewSource(int64(s.consumeUint64())) + } + return s +} + +func (s *ByteSource) Uint64() uint64 { + // Return from input if it was not exhausted. + if s.Len() > 0 { + return s.consumeUint64() + } + + // Input was exhausted, return random number from fallback (in this case fallback should not be + // nil). Try first having a Uint64 output (Should work in current rand implementation), + // otherwise return a conversion of Int63. + if s64, ok := s.fallback.(rand.Source64); ok { + return s64.Uint64() + } + return uint64(s.fallback.Int63()) +} + +func (s *ByteSource) Int63() int64 { + return int64(s.Uint64() >> 1) +} + +func (s *ByteSource) Seed(seed int64) { + s.fallback = rand.NewSource(seed) + s.Reader = bytes.NewReader(nil) +} + +// consumeUint64 reads 8 bytes from the input and convert them to a uint64. It assumes that the the +// bytes reader is not empty. +func (s *ByteSource) consumeUint64() uint64 { + var bytes [8]byte + _, err := s.Read(bytes[:]) + if err != nil && err != io.EOF { + panic("failed reading source") // Should not happen. + } + return binary.BigEndian.Uint64(bytes[:]) +} diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go index da0a5f938..761520a8c 100644 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ b/vendor/github.com/google/gofuzz/fuzz.go @@ -22,6 +22,9 @@ import ( "reflect" "regexp" "time" + + "github.com/google/gofuzz/bytesource" + "strings" ) // fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. @@ -61,6 +64,34 @@ func NewWithSeed(seed int64) *Fuzzer { return f } +// NewFromGoFuzz is a helper function that enables using gofuzz (this +// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous +// fuzzing. Essentially, it enables translating the fuzzing bytes from +// go-fuzz to any Go object using this library. +// +// This implementation promises a constant translation from a given slice of +// bytes to the fuzzed objects. This promise will remain over future +// versions of Go and of this library. +// +// Note: the returned Fuzzer should not be shared between multiple goroutines, +// as its deterministic output will no longer be available. +// +// Example: use go-fuzz to test the function `MyFunc(int)` in the package +// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content: +// +// // +build gofuzz +// package mypacakge +// import fuzz "github.com/google/gofuzz" +// func Fuzz(data []byte) int { +// var i int +// fuzz.NewFromGoFuzz(data).Fuzz(&i) +// MyFunc(i) +// return 0 +// } +func NewFromGoFuzz(data []byte) *Fuzzer { + return New().RandSource(bytesource.New(data)) +} + // Funcs adds each entry in fuzzFuncs as a custom fuzzing function. // // Each entry in fuzzFuncs must be a function taking two parameters. @@ -141,7 +172,7 @@ func (f *Fuzzer) genElementCount() int { } func (f *Fuzzer) genShouldFill() bool { - return f.r.Float64() > f.nilChance + return f.r.Float64() >= f.nilChance } // MaxDepth sets the maximum number of recursive fuzz calls that will be made @@ -240,6 +271,7 @@ func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { fn(v, fc.fuzzer.r) return } + switch v.Kind() { case reflect.Map: if fc.fuzzer.genShouldFill() { @@ -450,10 +482,10 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ v.SetFloat(r.Float64()) }, reflect.Complex64: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") + v.SetComplex(complex128(complex(r.Float32(), r.Float32()))) }, reflect.Complex128: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") + v.SetComplex(complex(r.Float64(), r.Float64())) }, reflect.String: func(v reflect.Value, r *rand.Rand) { v.SetString(randString(r)) @@ -465,38 +497,105 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ // randBool returns true or false randomly. func randBool(r *rand.Rand) bool { - if r.Int()&1 == 1 { - return true - } - return false + return r.Int31()&(1<<30) == 0 +} + +type int63nPicker interface { + Int63n(int64) int64 } -type charRange struct { - first, last rune +// UnicodeRange describes a sequential range of unicode characters. +// Last must be numerically greater than First. +type UnicodeRange struct { + First, Last rune } +// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters. +// To be useful, each range must have at least one character (First <= Last) and +// there must be at least one range. +type UnicodeRanges []UnicodeRange + // choose returns a random unicode character from the given range, using the // given randomness source. -func (r *charRange) choose(rand *rand.Rand) rune { - count := int64(r.last - r.first) - return r.first + rune(rand.Int63n(count)) +func (ur UnicodeRange) choose(r int63nPicker) rune { + count := int64(ur.Last - ur.First + 1) + return ur.First + rune(r.Int63n(count)) +} + +// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. +// Each character is selected from the range ur. If there are no characters +// in the range (cr.Last < cr.First), this will panic. +func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) { + ur.check() + return func(s *string, c Continue) { + *s = ur.randString(c.Rand) + } } -var unicodeRanges = []charRange{ +// check is a function that used to check whether the first of ur(UnicodeRange) +// is greater than the last one. +func (ur UnicodeRange) check() { + if ur.Last < ur.First { + panic("The last encoding must be greater than the first one.") + } +} + +// randString of UnicodeRange makes a random string up to 20 characters long. +// Each character is selected form ur(UnicodeRange). +func (ur UnicodeRange) randString(r *rand.Rand) string { + n := r.Intn(20) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur.choose(r)) + } + return sb.String() +} + +// defaultUnicodeRanges sets a default unicode range when user do not set +// CustomStringFuzzFunc() but wants fuzz string. +var defaultUnicodeRanges = UnicodeRanges{ {' ', '~'}, // ASCII characters {'\u00a0', '\u02af'}, // Multi-byte encoded characters {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) } +// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. +// Each character is selected from one of the ranges of ur(UnicodeRanges). +// Each range has an equal probability of being chosen. If there are no ranges, +// or a selected range has no characters (.Last < .First), this will panic. +// Do not modify any of the ranges in ur after calling this function. +func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) { + // Check unicode ranges slice is empty. + if len(ur) == 0 { + panic("UnicodeRanges is empty.") + } + // if not empty, each range should be checked. + for i := range ur { + ur[i].check() + } + return func(s *string, c Continue) { + *s = ur.randString(c.Rand) + } +} + +// randString of UnicodeRanges makes a random string up to 20 characters long. +// Each character is selected form one of the ranges of ur(UnicodeRanges), +// and each range has an equal probability of being chosen. +func (ur UnicodeRanges) randString(r *rand.Rand) string { + n := r.Intn(20) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur[r.Intn(len(ur))].choose(r)) + } + return sb.String() +} + // randString makes a random string up to 20 characters long. The returned string // may include a variety of (valid) UTF-8 encodings. func randString(r *rand.Rand) string { - n := r.Intn(20) - runes := make([]rune, n) - for i := range runes { - runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r) - } - return string(runes) + return defaultUnicodeRanges.randString(r) } // randUint64 makes random 64 bit numbers. diff --git a/vendor/github.com/imdario/mergo/CONTRIBUTING.md b/vendor/github.com/imdario/mergo/CONTRIBUTING.md new file mode 100644 index 000000000..0a1ff9f94 --- /dev/null +++ b/vendor/github.com/imdario/mergo/CONTRIBUTING.md @@ -0,0 +1,112 @@ + +# Contributing to mergo + +First off, thanks for taking the time to contribute! ❤️ + +All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 + +> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: +> - Star the project +> - Tweet about it +> - Refer this project in your project's readme +> - Mention the project at local meetups and tell your friends/colleagues + + +## Table of Contents + +- [Code of Conduct](#code-of-conduct) +- [I Have a Question](#i-have-a-question) +- [I Want To Contribute](#i-want-to-contribute) +- [Reporting Bugs](#reporting-bugs) +- [Suggesting Enhancements](#suggesting-enhancements) + +## Code of Conduct + +This project and everyone participating in it is governed by the +[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md). +By participating, you are expected to uphold this code. Please report unacceptable behavior +to <>. + + +## I Have a Question + +> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo). + +Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. + +If you then still feel the need to ask a question and need clarification, we recommend the following: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). +- Provide as much context as you can about what you're running into. +- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. + +We will then take care of the issue as soon as possible. + +## I Want To Contribute + +> ### Legal Notice +> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. + +### Reporting Bugs + + +#### Before Submitting a Bug Report + +A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. + +- Make sure that you are using the latest version. +- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). +- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug). +- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. +- Collect information about the bug: +- Stack trace (Traceback) +- OS, Platform and Version (Windows, Linux, macOS, x86, ARM) +- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. +- Possibly your input and the output +- Can you reliably reproduce the issue? And can you also reproduce it with older versions? + + +#### How Do I Submit a Good Bug Report? + +> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . + + +We use GitHub issues to track bugs and errors. If you run into an issue with the project: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) +- Explain the behavior you would expect and the actual behavior. +- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. +- Provide the information you collected in the previous section. + +Once it's filed: + +- The project team will label the issue accordingly. +- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. +- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone. + +### Suggesting Enhancements + +This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. + + +#### Before Submitting an Enhancement + +- Make sure that you are using the latest version. +- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. +- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. +- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. + + +#### How Do I Submit a Good Enhancement Suggestion? + +Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues). + +- Use a **clear and descriptive title** for the issue to identify the suggestion. +- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. +- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. +- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. +- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration. + + +## Attribution +This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index aa8cbd7ce..4f0287498 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -1,6 +1,5 @@ # Mergo - [![GoDoc][3]][4] [![GitHub release][5]][6] [![GoCard][7]][8] @@ -8,8 +7,8 @@ [![Coverage Status][9]][10] [![Sourcegraph][11]][12] [![FOSSA Status][13]][14] - -[![GoCenter Kudos][15]][16] +[![Become my sponsor][15]][16] +[![Tidelift][17]][18] [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo @@ -25,8 +24,10 @@ [12]: https://sourcegraph.com/github.com/imdario/mergo?badge [13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield [14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield -[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo -[16]: https://search.gocenter.io/github.com/imdario/mergo +[15]: https://img.shields.io/github/sponsors/imdario +[16]: https://github.com/sponsors/imdario +[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo +[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. @@ -36,11 +37,11 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the ## Status -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). ### Important note -Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules. +Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. @@ -51,9 +52,8 @@ If you were using Mergo before April 6th, 2015, please check your project works If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: Buy Me a Coffee at ko-fi.com -[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) -[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) Donate using Liberapay +Become my sponsor ### Mergo in the wild @@ -98,6 +98,8 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont - [jnuthong/item_search](https://github.com/jnuthong/item_search) - [bukalapak/snowboard](https://github.com/bukalapak/snowboard) - [containerssh/containerssh](https://github.com/containerssh/containerssh) +- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) +- [tjpnz/structbot](https://github.com/tjpnz/structbot) ## Install @@ -168,7 +170,7 @@ func main() { Note: if test are failing due missing package, please execute: - go get gopkg.in/yaml.v2 + go get gopkg.in/yaml.v3 ### Transformers @@ -218,7 +220,6 @@ func main() { } ``` - ## Contact me If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) @@ -227,18 +228,6 @@ If I can help you, you have an idea or you are using Mergo in your projects, don Written by [Dario Castañé](http://dario.im). -## Top Contributors - -[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) -[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) -[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) -[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) -[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) -[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) -[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) -[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) - - ## License [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). diff --git a/vendor/github.com/imdario/mergo/SECURITY.md b/vendor/github.com/imdario/mergo/SECURITY.md new file mode 100644 index 000000000..a5de61f77 --- /dev/null +++ b/vendor/github.com/imdario/mergo/SECURITY.md @@ -0,0 +1,14 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.3.x | :white_check_mark: | +| < 0.3 | :x: | + +## Security contact information + +To report a security vulnerability, please use the +[Tidelift security contact](https://tidelift.com/security). +Tidelift will coordinate the fix and disclosure. diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go index a13a7ee46..b50d5c2a4 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/github.com/imdario/mergo/map.go @@ -44,7 +44,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } } // Remember, remember... - visited[h] = &visit{addr, typ, seen} + visited[h] = &visit{typ, seen, addr} } zeroValue := reflect.Value{} switch dst.Kind() { @@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } fieldName := field.Name fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { dstMap[fieldName] = src.Field(i).Interface() } } @@ -142,7 +142,7 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { func _map(dst, src interface{}, opts ...func(*Config)) error { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument + return ErrNonPointerArgument } var ( vDst, vSrc reflect.Value diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 8c2a8fcd9..0ef9b2138 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -38,10 +38,11 @@ func isExportedComponent(field *reflect.StructField) bool { } type Config struct { + Transformers Transformers Overwrite bool + ShouldNotDereference bool AppendSlice bool TypeCheck bool - Transformers Transformers overwriteWithEmptyValue bool overwriteSliceWithEmptyValue bool sliceDeepCopy bool @@ -76,10 +77,10 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } // Remember, remember... - visited[h] = &visit{addr, typ, seen} + visited[h] = &visit{typ, seen, addr} } - if config.Transformers != nil && !isEmptyValue(dst) { + if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { if fn := config.Transformers.Transformer(dst.Type()); fn != nil { err = fn(dst, src) return @@ -95,7 +96,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } } else { - if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) { dst.Set(src) } } @@ -110,7 +111,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } if src.Kind() != reflect.Map { - if overwrite { + if overwrite && dst.CanSet() { dst.Set(src) } return @@ -162,7 +163,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dstSlice = reflect.ValueOf(dstElement.Interface()) } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { if typeCheck && srcSlice.Type() != dstSlice.Type() { return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) } @@ -194,22 +195,38 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dst.SetMapIndex(key, dstSlice) } } - if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { - continue + + if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) { + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice { + continue + } + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map { + continue + } } - if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } dst.SetMapIndex(key, srcElement) } } + + // Ensure that all keys in dst are deleted if they are not in src. + if overwriteWithEmptySrc { + for _, key := range dst.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + dst.SetMapIndex(key, reflect.Value{}) + } + } + } case reflect.Slice: if !dst.CanSet() { break } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { dst.Set(src) } else if config.AppendSlice { if src.Type() != dst.Type() { @@ -244,12 +261,18 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if src.Kind() != reflect.Interface { if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { dst.Set(src) } } else if src.Kind() == reflect.Ptr { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return + if !config.ShouldNotDereference { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else { + if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { + dst.Set(src) + } } } else if dst.Elem().Type() == src.Type() { if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { @@ -262,7 +285,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { dst.Set(src) } break @@ -275,7 +298,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co break } default: - mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) + mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) if mustSet { if dst.CanSet() { dst.Set(src) @@ -326,6 +349,12 @@ func WithOverrideEmptySlice(config *Config) { config.overwriteSliceWithEmptyValue = true } +// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty +// (i.e. a non-nil pointer is never considered empty). +func WithoutDereference(config *Config) { + config.ShouldNotDereference = true +} + // WithAppendSlice will make merge append slices instead of overwriting it. func WithAppendSlice(config *Config) { config.AppendSlice = true @@ -344,7 +373,7 @@ func WithSliceDeepCopy(config *Config) { func merge(dst, src interface{}, opts ...func(*Config)) error { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument + return ErrNonPointerArgument } var ( vDst, vSrc reflect.Value diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go index 3cc926c7f..0a721e2d8 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -17,10 +17,10 @@ import ( var ( ErrNilArguments = errors.New("src and dst must not be nil") ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs and maps are supported") + ErrNotSupported = errors.New("only structs, maps, and slices are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") - ErrNonPointerAgument = errors.New("dst must be a pointer") + ErrNonPointerArgument = errors.New("dst must be a pointer") ) // During deepMerge, must keep track of checks that are @@ -28,13 +28,13 @@ var ( // checks in progress are true when it reencounters them. // Visited are stored in a map indexed by 17 * a1 + a2; type visit struct { - ptr uintptr typ reflect.Type next *visit + ptr uintptr } // From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value) bool { +func isEmptyValue(v reflect.Value, shouldDereference bool) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 @@ -50,7 +50,10 @@ func isEmptyValue(v reflect.Value) bool { if v.IsNil() { return true } - return isEmptyValue(v.Elem()) + if shouldDereference { + return isEmptyValue(v.Elem(), shouldDereference) + } + return false case reflect.Func: return v.IsNil() case reflect.Invalid: @@ -65,7 +68,7 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { return } vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { err = ErrNotSupported return } diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go index a42e9d65a..b5f5e2613 100644 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -401,6 +401,7 @@ func (r *Lexer) scanToken() { // consume resets the current token to allow scanning the next one. func (r *Lexer) consume() { r.token.kind = tokenUndef + r.token.byteValueCloned = false r.token.delimValue = 0 } @@ -528,6 +529,7 @@ func (r *Lexer) Skip() { func (r *Lexer) SkipRecursive() { r.scanToken() var start, end byte + startPos := r.start switch r.token.delimValue { case '{': @@ -553,6 +555,14 @@ func (r *Lexer) SkipRecursive() { level-- if level == 0 { r.pos += i + 1 + if !json.Valid(r.Data[startPos:r.pos]) { + r.pos = len(r.Data) + r.fatalError = &LexerError{ + Reason: "skipped array/object json value is invalid", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + } + } return } case c == '\\' && inQuotes: @@ -702,6 +712,10 @@ func (r *Lexer) Bytes() []byte { r.errInvalidToken("string") return nil } + if err := r.unescapeStringToken(); err != nil { + r.errInvalidToken("string") + return nil + } ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) n, err := base64.StdEncoding.Decode(ret, r.token.byteValue) if err != nil { diff --git a/vendor/github.com/moby/spdystream/CONTRIBUTING.md b/vendor/github.com/moby/spdystream/CONTRIBUTING.md new file mode 100644 index 000000000..d4eddcc53 --- /dev/null +++ b/vendor/github.com/moby/spdystream/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to SpdyStream + +Want to hack on spdystream? Awesome! Here are instructions to get you +started. + +SpdyStream is a part of the [Docker](https://docker.io) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/vendor/github.com/moby/spdystream/LICENSE b/vendor/github.com/moby/spdystream/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/moby/spdystream/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/spdystream/MAINTAINERS b/vendor/github.com/moby/spdystream/MAINTAINERS new file mode 100644 index 000000000..26e5ec828 --- /dev/null +++ b/vendor/github.com/moby/spdystream/MAINTAINERS @@ -0,0 +1,40 @@ +# Spdystream maintainers file +# +# This file describes who runs the moby/spdystream project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "adisky", + "dims", + "dmcgowan", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.adisky] + Name = "Aditi Sharma" + Email = "adi.sky17@gmail.com" + GitHub = "adisky" + + [people.dims] + Name = "Davanum Srinivas" + Email = "davanum@gmail.com" + GitHub = "dims" + + [people.dmcgowan] + Name = "Derek McGowan" + Email = "derek@mcg.dev" + GitHub = "dmcgowan" diff --git a/vendor/github.com/moby/spdystream/NOTICE b/vendor/github.com/moby/spdystream/NOTICE new file mode 100644 index 000000000..b9b11c9ab --- /dev/null +++ b/vendor/github.com/moby/spdystream/NOTICE @@ -0,0 +1,5 @@ +SpdyStream +Copyright 2014-2021 Docker Inc. + +This product includes software developed at +Docker Inc. (https://www.docker.com/). diff --git a/vendor/github.com/moby/spdystream/README.md b/vendor/github.com/moby/spdystream/README.md new file mode 100644 index 000000000..b84e98343 --- /dev/null +++ b/vendor/github.com/moby/spdystream/README.md @@ -0,0 +1,77 @@ +# SpdyStream + +A multiplexed stream library using spdy + +## Usage + +Client example (connecting to mirroring server without auth) + +```go +package main + +import ( + "fmt" + "github.com/moby/spdystream" + "net" + "net/http" +) + +func main() { + conn, err := net.Dial("tcp", "localhost:8080") + if err != nil { + panic(err) + } + spdyConn, err := spdystream.NewConnection(conn, false) + if err != nil { + panic(err) + } + go spdyConn.Serve(spdystream.NoOpStreamHandler) + stream, err := spdyConn.CreateStream(http.Header{}, nil, false) + if err != nil { + panic(err) + } + + stream.Wait() + + fmt.Fprint(stream, "Writing to stream") + + buf := make([]byte, 25) + stream.Read(buf) + fmt.Println(string(buf)) + + stream.Close() +} +``` + +Server example (mirroring server without auth) + +```go +package main + +import ( + "github.com/moby/spdystream" + "net" +) + +func main() { + listener, err := net.Listen("tcp", "localhost:8080") + if err != nil { + panic(err) + } + for { + conn, err := listener.Accept() + if err != nil { + panic(err) + } + spdyConn, err := spdystream.NewConnection(conn, true) + if err != nil { + panic(err) + } + go spdyConn.Serve(spdystream.MirrorStreamHandler) + } +} +``` + +## Copyright and license + +Copyright 2013-2021 Docker, inc. Released under the [Apache 2.0 license](LICENSE). diff --git a/vendor/github.com/moby/spdystream/connection.go b/vendor/github.com/moby/spdystream/connection.go new file mode 100644 index 000000000..d906bb05c --- /dev/null +++ b/vendor/github.com/moby/spdystream/connection.go @@ -0,0 +1,972 @@ +/* + Copyright 2014-2021 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package spdystream + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "sync" + "time" + + "github.com/moby/spdystream/spdy" +) + +var ( + ErrInvalidStreamId = errors.New("Invalid stream id") + ErrTimeout = errors.New("Timeout occurred") + ErrReset = errors.New("Stream reset") + ErrWriteClosedStream = errors.New("Write on closed stream") +) + +const ( + FRAME_WORKERS = 5 + QUEUE_SIZE = 50 +) + +type StreamHandler func(stream *Stream) + +type AuthHandler func(header http.Header, slot uint8, parent uint32) bool + +type idleAwareFramer struct { + f *spdy.Framer + conn *Connection + writeLock sync.Mutex + resetChan chan struct{} + setTimeoutLock sync.Mutex + setTimeoutChan chan time.Duration + timeout time.Duration +} + +func newIdleAwareFramer(framer *spdy.Framer) *idleAwareFramer { + iaf := &idleAwareFramer{ + f: framer, + resetChan: make(chan struct{}, 2), + // setTimeoutChan needs to be buffered to avoid deadlocks when calling setIdleTimeout at about + // the same time the connection is being closed + setTimeoutChan: make(chan time.Duration, 1), + } + return iaf +} + +func (i *idleAwareFramer) monitor() { + var ( + timer *time.Timer + expired <-chan time.Time + resetChan = i.resetChan + setTimeoutChan = i.setTimeoutChan + ) +Loop: + for { + select { + case timeout := <-i.setTimeoutChan: + i.timeout = timeout + if timeout == 0 { + if timer != nil { + timer.Stop() + } + } else { + if timer == nil { + timer = time.NewTimer(timeout) + expired = timer.C + } else { + timer.Reset(timeout) + } + } + case <-resetChan: + if timer != nil && i.timeout > 0 { + timer.Reset(i.timeout) + } + case <-expired: + i.conn.streamCond.L.Lock() + streams := i.conn.streams + i.conn.streams = make(map[spdy.StreamId]*Stream) + i.conn.streamCond.Broadcast() + i.conn.streamCond.L.Unlock() + go func() { + for _, stream := range streams { + stream.resetStream() + } + i.conn.Close() + }() + case <-i.conn.closeChan: + if timer != nil { + timer.Stop() + } + + // Start a goroutine to drain resetChan. This is needed because we've seen + // some unit tests with large numbers of goroutines get into a situation + // where resetChan fills up, at least 1 call to Write() is still trying to + // send to resetChan, the connection gets closed, and this case statement + // attempts to grab the write lock that Write() already has, causing a + // deadlock. + // + // See https://github.com/moby/spdystream/issues/49 for more details. + go func() { + for range resetChan { + } + }() + + go func() { + for range setTimeoutChan { + } + }() + + i.writeLock.Lock() + close(resetChan) + i.resetChan = nil + i.writeLock.Unlock() + + i.setTimeoutLock.Lock() + close(i.setTimeoutChan) + i.setTimeoutChan = nil + i.setTimeoutLock.Unlock() + + break Loop + } + } + + // Drain resetChan + for range resetChan { + } +} + +func (i *idleAwareFramer) WriteFrame(frame spdy.Frame) error { + i.writeLock.Lock() + defer i.writeLock.Unlock() + if i.resetChan == nil { + return io.EOF + } + err := i.f.WriteFrame(frame) + if err != nil { + return err + } + + i.resetChan <- struct{}{} + + return nil +} + +func (i *idleAwareFramer) ReadFrame() (spdy.Frame, error) { + frame, err := i.f.ReadFrame() + if err != nil { + return nil, err + } + + // resetChan should never be closed since it is only closed + // when the connection has closed its closeChan. This closure + // only occurs after all Reads have finished + // TODO (dmcgowan): refactor relationship into connection + i.resetChan <- struct{}{} + + return frame, nil +} + +func (i *idleAwareFramer) setIdleTimeout(timeout time.Duration) { + i.setTimeoutLock.Lock() + defer i.setTimeoutLock.Unlock() + + if i.setTimeoutChan == nil { + return + } + + i.setTimeoutChan <- timeout +} + +type Connection struct { + conn net.Conn + framer *idleAwareFramer + + closeChan chan bool + goneAway bool + lastStreamChan chan<- *Stream + goAwayTimeout time.Duration + closeTimeout time.Duration + + streamLock *sync.RWMutex + streamCond *sync.Cond + streams map[spdy.StreamId]*Stream + + nextIdLock sync.Mutex + receiveIdLock sync.Mutex + nextStreamId spdy.StreamId + receivedStreamId spdy.StreamId + + pingIdLock sync.Mutex + pingId uint32 + pingChans map[uint32]chan error + + shutdownLock sync.Mutex + shutdownChan chan error + hasShutdown bool + + // for testing https://github.com/moby/spdystream/pull/56 + dataFrameHandler func(*spdy.DataFrame) error +} + +// NewConnection creates a new spdy connection from an existing +// network connection. +func NewConnection(conn net.Conn, server bool) (*Connection, error) { + framer, framerErr := spdy.NewFramer(conn, conn) + if framerErr != nil { + return nil, framerErr + } + idleAwareFramer := newIdleAwareFramer(framer) + var sid spdy.StreamId + var rid spdy.StreamId + var pid uint32 + if server { + sid = 2 + rid = 1 + pid = 2 + } else { + sid = 1 + rid = 2 + pid = 1 + } + + streamLock := new(sync.RWMutex) + streamCond := sync.NewCond(streamLock) + + session := &Connection{ + conn: conn, + framer: idleAwareFramer, + + closeChan: make(chan bool), + goAwayTimeout: time.Duration(0), + closeTimeout: time.Duration(0), + + streamLock: streamLock, + streamCond: streamCond, + streams: make(map[spdy.StreamId]*Stream), + nextStreamId: sid, + receivedStreamId: rid, + + pingId: pid, + pingChans: make(map[uint32]chan error), + + shutdownChan: make(chan error), + } + session.dataFrameHandler = session.handleDataFrame + idleAwareFramer.conn = session + go idleAwareFramer.monitor() + + return session, nil +} + +// Ping sends a ping frame across the connection and +// returns the response time +func (s *Connection) Ping() (time.Duration, error) { + pid := s.pingId + s.pingIdLock.Lock() + if s.pingId > 0x7ffffffe { + s.pingId = s.pingId - 0x7ffffffe + } else { + s.pingId = s.pingId + 2 + } + s.pingIdLock.Unlock() + pingChan := make(chan error) + s.pingChans[pid] = pingChan + defer delete(s.pingChans, pid) + + frame := &spdy.PingFrame{Id: pid} + startTime := time.Now() + writeErr := s.framer.WriteFrame(frame) + if writeErr != nil { + return time.Duration(0), writeErr + } + select { + case <-s.closeChan: + return time.Duration(0), errors.New("connection closed") + case err, ok := <-pingChan: + if ok && err != nil { + return time.Duration(0), err + } + break + } + return time.Since(startTime), nil +} + +// Serve handles frames sent from the server, including reply frames +// which are needed to fully initiate connections. Both clients and servers +// should call Serve in a separate goroutine before creating streams. +func (s *Connection) Serve(newHandler StreamHandler) { + // use a WaitGroup to wait for all frames to be drained after receiving + // go-away. + var wg sync.WaitGroup + + // Parition queues to ensure stream frames are handled + // by the same worker, ensuring order is maintained + frameQueues := make([]*PriorityFrameQueue, FRAME_WORKERS) + for i := 0; i < FRAME_WORKERS; i++ { + frameQueues[i] = NewPriorityFrameQueue(QUEUE_SIZE) + + // Ensure frame queue is drained when connection is closed + go func(frameQueue *PriorityFrameQueue) { + <-s.closeChan + frameQueue.Drain() + }(frameQueues[i]) + + wg.Add(1) + go func(frameQueue *PriorityFrameQueue) { + // let the WaitGroup know this worker is done + defer wg.Done() + + s.frameHandler(frameQueue, newHandler) + }(frameQueues[i]) + } + + var ( + partitionRoundRobin int + goAwayFrame *spdy.GoAwayFrame + ) +Loop: + for { + readFrame, err := s.framer.ReadFrame() + if err != nil { + if err != io.EOF { + debugMessage("frame read error: %s", err) + } else { + debugMessage("(%p) EOF received", s) + } + break + } + var priority uint8 + var partition int + switch frame := readFrame.(type) { + case *spdy.SynStreamFrame: + if s.checkStreamFrame(frame) { + priority = frame.Priority + partition = int(frame.StreamId % FRAME_WORKERS) + debugMessage("(%p) Add stream frame: %d ", s, frame.StreamId) + s.addStreamFrame(frame) + } else { + debugMessage("(%p) Rejected stream frame: %d ", s, frame.StreamId) + continue + } + case *spdy.SynReplyFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.DataFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.RstStreamFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.HeadersFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.PingFrame: + priority = 0 + partition = partitionRoundRobin + partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS + case *spdy.GoAwayFrame: + // hold on to the go away frame and exit the loop + goAwayFrame = frame + break Loop + default: + priority = 7 + partition = partitionRoundRobin + partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS + } + frameQueues[partition].Push(readFrame, priority) + } + close(s.closeChan) + + // wait for all frame handler workers to indicate they've drained their queues + // before handling the go away frame + wg.Wait() + + if goAwayFrame != nil { + s.handleGoAwayFrame(goAwayFrame) + } + + // now it's safe to close remote channels and empty s.streams + s.streamCond.L.Lock() + // notify streams that they're now closed, which will + // unblock any stream Read() calls + for _, stream := range s.streams { + stream.closeRemoteChannels() + } + s.streams = make(map[spdy.StreamId]*Stream) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler StreamHandler) { + for { + popFrame := frameQueue.Pop() + if popFrame == nil { + return + } + + var frameErr error + switch frame := popFrame.(type) { + case *spdy.SynStreamFrame: + frameErr = s.handleStreamFrame(frame, newHandler) + case *spdy.SynReplyFrame: + frameErr = s.handleReplyFrame(frame) + case *spdy.DataFrame: + frameErr = s.dataFrameHandler(frame) + case *spdy.RstStreamFrame: + frameErr = s.handleResetFrame(frame) + case *spdy.HeadersFrame: + frameErr = s.handleHeaderFrame(frame) + case *spdy.PingFrame: + frameErr = s.handlePingFrame(frame) + case *spdy.GoAwayFrame: + frameErr = s.handleGoAwayFrame(frame) + default: + frameErr = fmt.Errorf("unhandled frame type: %T", frame) + } + + if frameErr != nil { + debugMessage("frame handling error: %s", frameErr) + } + } +} + +func (s *Connection) getStreamPriority(streamId spdy.StreamId) uint8 { + stream, streamOk := s.getStream(streamId) + if !streamOk { + return 7 + } + return stream.priority +} + +func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) { + var parent *Stream + if frame.AssociatedToStreamId != spdy.StreamId(0) { + parent, _ = s.getStream(frame.AssociatedToStreamId) + } + + stream := &Stream{ + streamId: frame.StreamId, + parent: parent, + conn: s, + startChan: make(chan error), + headers: frame.Headers, + finished: (frame.CFHeader.Flags & spdy.ControlFlagUnidirectional) != 0x00, + replyCond: sync.NewCond(new(sync.Mutex)), + dataChan: make(chan []byte), + headerChan: make(chan http.Header), + closeChan: make(chan bool), + priority: frame.Priority, + } + if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 { + stream.closeRemoteChannels() + } + + s.addStream(stream) +} + +// checkStreamFrame checks to see if a stream frame is allowed. +// If the stream is invalid, then a reset frame with protocol error +// will be returned. +func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool { + s.receiveIdLock.Lock() + defer s.receiveIdLock.Unlock() + if s.goneAway { + return false + } + validationErr := s.validateStreamId(frame.StreamId) + if validationErr != nil { + go func() { + resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId) + if resetErr != nil { + debugMessage("reset error: %s", resetErr) + } + }() + return false + } + return true +} + +func (s *Connection) handleStreamFrame(frame *spdy.SynStreamFrame, newHandler StreamHandler) error { + stream, ok := s.getStream(frame.StreamId) + if !ok { + return fmt.Errorf("Missing stream: %d", frame.StreamId) + } + + newHandler(stream) + + return nil +} + +func (s *Connection) handleReplyFrame(frame *spdy.SynReplyFrame) error { + debugMessage("(%p) Reply frame received for %d", s, frame.StreamId) + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + debugMessage("Reply frame gone away for %d", frame.StreamId) + // Stream has already gone away + return nil + } + if stream.replied { + // Stream has already received reply + return nil + } + stream.replied = true + + // TODO Check for error + if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + + close(stream.startChan) + + return nil +} + +func (s *Connection) handleResetFrame(frame *spdy.RstStreamFrame) error { + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + // Stream has already been removed + return nil + } + s.removeStream(stream) + stream.closeRemoteChannels() + + if !stream.replied { + stream.replied = true + stream.startChan <- ErrReset + close(stream.startChan) + } + + stream.finishLock.Lock() + stream.finished = true + stream.finishLock.Unlock() + + return nil +} + +func (s *Connection) handleHeaderFrame(frame *spdy.HeadersFrame) error { + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + // Stream has already gone away + return nil + } + if !stream.replied { + // No reply received...Protocol error? + return nil + } + + // TODO limit headers while not blocking (use buffered chan or goroutine?) + select { + case <-stream.closeChan: + return nil + case stream.headerChan <- frame.Headers: + } + + if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + + return nil +} + +func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error { + debugMessage("(%p) Data frame received for %d", s, frame.StreamId) + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + debugMessage("(%p) Data frame gone away for %d", s, frame.StreamId) + // Stream has already gone away + return nil + } + if !stream.replied { + debugMessage("(%p) Data frame not replied %d", s, frame.StreamId) + // No reply received...Protocol error? + return nil + } + + debugMessage("(%p) (%d) Data frame handling", stream, stream.streamId) + if len(frame.Data) > 0 { + stream.dataLock.RLock() + select { + case <-stream.closeChan: + debugMessage("(%p) (%d) Data frame not sent (stream shut down)", stream, stream.streamId) + case stream.dataChan <- frame.Data: + debugMessage("(%p) (%d) Data frame sent", stream, stream.streamId) + } + stream.dataLock.RUnlock() + } + if (frame.Flags & spdy.DataFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + return nil +} + +func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error { + if s.pingId&0x01 != frame.Id&0x01 { + return s.framer.WriteFrame(frame) + } + pingChan, pingOk := s.pingChans[frame.Id] + if pingOk { + close(pingChan) + } + return nil +} + +func (s *Connection) handleGoAwayFrame(frame *spdy.GoAwayFrame) error { + debugMessage("(%p) Go away received", s) + s.receiveIdLock.Lock() + if s.goneAway { + s.receiveIdLock.Unlock() + return nil + } + s.goneAway = true + s.receiveIdLock.Unlock() + + if s.lastStreamChan != nil { + stream, _ := s.getStream(frame.LastGoodStreamId) + go func() { + s.lastStreamChan <- stream + }() + } + + // Do not block frame handler waiting for closure + go s.shutdown(s.goAwayTimeout) + + return nil +} + +func (s *Connection) remoteStreamFinish(stream *Stream) { + stream.closeRemoteChannels() + + stream.finishLock.Lock() + if stream.finished { + // Stream is fully closed, cleanup + s.removeStream(stream) + } + stream.finishLock.Unlock() +} + +// CreateStream creates a new spdy stream using the parameters for +// creating the stream frame. The stream frame will be sent upon +// calling this function, however this function does not wait for +// the reply frame. If waiting for the reply is desired, use +// the stream Wait or WaitTimeout function on the stream returned +// by this function. +func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) { + // MUST synchronize stream creation (all the way to writing the frame) + // as stream IDs **MUST** increase monotonically. + s.nextIdLock.Lock() + defer s.nextIdLock.Unlock() + + streamId := s.getNextStreamId() + if streamId == 0 { + return nil, fmt.Errorf("Unable to get new stream id") + } + + stream := &Stream{ + streamId: streamId, + parent: parent, + conn: s, + startChan: make(chan error), + headers: headers, + dataChan: make(chan []byte), + headerChan: make(chan http.Header), + closeChan: make(chan bool), + } + + debugMessage("(%p) (%p) Create stream", s, stream) + + s.addStream(stream) + + return stream, s.sendStream(stream, fin) +} + +func (s *Connection) shutdown(closeTimeout time.Duration) { + // TODO Ensure this isn't called multiple times + s.shutdownLock.Lock() + if s.hasShutdown { + s.shutdownLock.Unlock() + return + } + s.hasShutdown = true + s.shutdownLock.Unlock() + + var timeout <-chan time.Time + if closeTimeout > time.Duration(0) { + timeout = time.After(closeTimeout) + } + streamsClosed := make(chan bool) + + go func() { + s.streamCond.L.Lock() + for len(s.streams) > 0 { + debugMessage("Streams opened: %d, %#v", len(s.streams), s.streams) + s.streamCond.Wait() + } + s.streamCond.L.Unlock() + close(streamsClosed) + }() + + var err error + select { + case <-streamsClosed: + // No active streams, close should be safe + err = s.conn.Close() + case <-timeout: + // Force ungraceful close + err = s.conn.Close() + // Wait for cleanup to clear active streams + <-streamsClosed + } + + if err != nil { + duration := 10 * time.Minute + time.AfterFunc(duration, func() { + select { + case err, ok := <-s.shutdownChan: + if ok { + debugMessage("Unhandled close error after %s: %s", duration, err) + } + default: + } + }) + s.shutdownChan <- err + } + close(s.shutdownChan) +} + +// Closes spdy connection by sending GoAway frame and initiating shutdown +func (s *Connection) Close() error { + s.receiveIdLock.Lock() + if s.goneAway { + s.receiveIdLock.Unlock() + return nil + } + s.goneAway = true + s.receiveIdLock.Unlock() + + var lastStreamId spdy.StreamId + if s.receivedStreamId > 2 { + lastStreamId = s.receivedStreamId - 2 + } + + goAwayFrame := &spdy.GoAwayFrame{ + LastGoodStreamId: lastStreamId, + Status: spdy.GoAwayOK, + } + + err := s.framer.WriteFrame(goAwayFrame) + go s.shutdown(s.closeTimeout) + if err != nil { + return err + } + + return nil +} + +// CloseWait closes the connection and waits for shutdown +// to finish. Note the underlying network Connection +// is not closed until the end of shutdown. +func (s *Connection) CloseWait() error { + closeErr := s.Close() + if closeErr != nil { + return closeErr + } + shutdownErr, ok := <-s.shutdownChan + if ok { + return shutdownErr + } + return nil +} + +// Wait waits for the connection to finish shutdown or for +// the wait timeout duration to expire. This needs to be +// called either after Close has been called or the GOAWAYFRAME +// has been received. If the wait timeout is 0, this function +// will block until shutdown finishes. If wait is never called +// and a shutdown error occurs, that error will be logged as an +// unhandled error. +func (s *Connection) Wait(waitTimeout time.Duration) error { + var timeout <-chan time.Time + if waitTimeout > time.Duration(0) { + timeout = time.After(waitTimeout) + } + + select { + case err, ok := <-s.shutdownChan: + if ok { + return err + } + case <-timeout: + return ErrTimeout + } + return nil +} + +// NotifyClose registers a channel to be called when the remote +// peer inidicates connection closure. The last stream to be +// received by the remote will be sent on the channel. The notify +// timeout will determine the duration between go away received +// and the connection being closed. +func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) { + s.goAwayTimeout = timeout + s.lastStreamChan = c +} + +// SetCloseTimeout sets the amount of time close will wait for +// streams to finish before terminating the underlying network +// connection. Setting the timeout to 0 will cause close to +// wait forever, which is the default. +func (s *Connection) SetCloseTimeout(timeout time.Duration) { + s.closeTimeout = timeout +} + +// SetIdleTimeout sets the amount of time the connection may sit idle before +// it is forcefully terminated. +func (s *Connection) SetIdleTimeout(timeout time.Duration) { + s.framer.setIdleTimeout(timeout) +} + +func (s *Connection) sendHeaders(headers http.Header, stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + } + + headerFrame := &spdy.HeadersFrame{ + StreamId: stream.streamId, + Headers: headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(headerFrame) +} + +func (s *Connection) sendReply(headers http.Header, stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + } + + replyFrame := &spdy.SynReplyFrame{ + StreamId: stream.streamId, + Headers: headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(replyFrame) +} + +func (s *Connection) sendResetFrame(status spdy.RstStreamStatus, streamId spdy.StreamId) error { + resetFrame := &spdy.RstStreamFrame{ + StreamId: streamId, + Status: status, + } + + return s.framer.WriteFrame(resetFrame) +} + +func (s *Connection) sendReset(status spdy.RstStreamStatus, stream *Stream) error { + return s.sendResetFrame(status, stream.streamId) +} + +func (s *Connection) sendStream(stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + stream.finished = true + } + + var parentId spdy.StreamId + if stream.parent != nil { + parentId = stream.parent.streamId + } + + streamFrame := &spdy.SynStreamFrame{ + StreamId: spdy.StreamId(stream.streamId), + AssociatedToStreamId: spdy.StreamId(parentId), + Headers: stream.headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(streamFrame) +} + +// getNextStreamId returns the next sequential id +// every call should produce a unique value or an error +func (s *Connection) getNextStreamId() spdy.StreamId { + sid := s.nextStreamId + if sid > 0x7fffffff { + return 0 + } + s.nextStreamId = s.nextStreamId + 2 + return sid +} + +// PeekNextStreamId returns the next sequential id and keeps the next id untouched +func (s *Connection) PeekNextStreamId() spdy.StreamId { + sid := s.nextStreamId + return sid +} + +func (s *Connection) validateStreamId(rid spdy.StreamId) error { + if rid > 0x7fffffff || rid < s.receivedStreamId { + return ErrInvalidStreamId + } + s.receivedStreamId = rid + 2 + return nil +} + +func (s *Connection) addStream(stream *Stream) { + s.streamCond.L.Lock() + s.streams[stream.streamId] = stream + debugMessage("(%p) (%p) Stream added, broadcasting: %d", s, stream, stream.streamId) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) removeStream(stream *Stream) { + s.streamCond.L.Lock() + delete(s.streams, stream.streamId) + debugMessage("(%p) (%p) Stream removed, broadcasting: %d", s, stream, stream.streamId) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) getStream(streamId spdy.StreamId) (stream *Stream, ok bool) { + s.streamLock.RLock() + stream, ok = s.streams[streamId] + s.streamLock.RUnlock() + return +} + +// FindStream looks up the given stream id and either waits for the +// stream to be found or returns nil if the stream id is no longer +// valid. +func (s *Connection) FindStream(streamId uint32) *Stream { + var stream *Stream + var ok bool + s.streamCond.L.Lock() + stream, ok = s.streams[spdy.StreamId(streamId)] + debugMessage("(%p) Found stream %d? %t", s, spdy.StreamId(streamId), ok) + for !ok && streamId >= uint32(s.receivedStreamId) { + s.streamCond.Wait() + stream, ok = s.streams[spdy.StreamId(streamId)] + } + s.streamCond.L.Unlock() + return stream +} + +func (s *Connection) CloseChan() <-chan bool { + return s.closeChan +} diff --git a/vendor/github.com/moby/spdystream/handlers.go b/vendor/github.com/moby/spdystream/handlers.go new file mode 100644 index 000000000..d68f61f81 --- /dev/null +++ b/vendor/github.com/moby/spdystream/handlers.go @@ -0,0 +1,52 @@ +/* + Copyright 2014-2021 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package spdystream + +import ( + "io" + "net/http" +) + +// MirrorStreamHandler mirrors all streams. +func MirrorStreamHandler(stream *Stream) { + replyErr := stream.SendReply(http.Header{}, false) + if replyErr != nil { + return + } + + go func() { + io.Copy(stream, stream) + stream.Close() + }() + go func() { + for { + header, receiveErr := stream.ReceiveHeader() + if receiveErr != nil { + return + } + sendErr := stream.SendHeader(header, false) + if sendErr != nil { + return + } + } + }() +} + +// NoopStreamHandler does nothing when stream connects. +func NoOpStreamHandler(stream *Stream) { + stream.SendReply(http.Header{}, false) +} diff --git a/vendor/github.com/moby/spdystream/priority.go b/vendor/github.com/moby/spdystream/priority.go new file mode 100644 index 000000000..d8eb3516c --- /dev/null +++ b/vendor/github.com/moby/spdystream/priority.go @@ -0,0 +1,114 @@ +/* + Copyright 2014-2021 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package spdystream + +import ( + "container/heap" + "sync" + + "github.com/moby/spdystream/spdy" +) + +type prioritizedFrame struct { + frame spdy.Frame + priority uint8 + insertId uint64 +} + +type frameQueue []*prioritizedFrame + +func (fq frameQueue) Len() int { + return len(fq) +} + +func (fq frameQueue) Less(i, j int) bool { + if fq[i].priority == fq[j].priority { + return fq[i].insertId < fq[j].insertId + } + return fq[i].priority < fq[j].priority +} + +func (fq frameQueue) Swap(i, j int) { + fq[i], fq[j] = fq[j], fq[i] +} + +func (fq *frameQueue) Push(x interface{}) { + *fq = append(*fq, x.(*prioritizedFrame)) +} + +func (fq *frameQueue) Pop() interface{} { + old := *fq + n := len(old) + *fq = old[0 : n-1] + return old[n-1] +} + +type PriorityFrameQueue struct { + queue *frameQueue + c *sync.Cond + size int + nextInsertId uint64 + drain bool +} + +func NewPriorityFrameQueue(size int) *PriorityFrameQueue { + queue := make(frameQueue, 0, size) + heap.Init(&queue) + + return &PriorityFrameQueue{ + queue: &queue, + size: size, + c: sync.NewCond(&sync.Mutex{}), + } +} + +func (q *PriorityFrameQueue) Push(frame spdy.Frame, priority uint8) { + q.c.L.Lock() + defer q.c.L.Unlock() + for q.queue.Len() >= q.size { + q.c.Wait() + } + pFrame := &prioritizedFrame{ + frame: frame, + priority: priority, + insertId: q.nextInsertId, + } + q.nextInsertId = q.nextInsertId + 1 + heap.Push(q.queue, pFrame) + q.c.Signal() +} + +func (q *PriorityFrameQueue) Pop() spdy.Frame { + q.c.L.Lock() + defer q.c.L.Unlock() + for q.queue.Len() == 0 { + if q.drain { + return nil + } + q.c.Wait() + } + frame := heap.Pop(q.queue).(*prioritizedFrame).frame + q.c.Signal() + return frame +} + +func (q *PriorityFrameQueue) Drain() { + q.c.L.Lock() + defer q.c.L.Unlock() + q.drain = true + q.c.Broadcast() +} diff --git a/vendor/github.com/moby/spdystream/spdy/dictionary.go b/vendor/github.com/moby/spdystream/spdy/dictionary.go new file mode 100644 index 000000000..392232f17 --- /dev/null +++ b/vendor/github.com/moby/spdystream/spdy/dictionary.go @@ -0,0 +1,203 @@ +/* + Copyright 2014-2021 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +// headerDictionary is the dictionary sent to the zlib compressor/decompressor. +var headerDictionary = []byte{ + 0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68, + 0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70, + 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70, + 0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00, + 0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00, + 0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, + 0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63, + 0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f, + 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00, + 0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, + 0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00, + 0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65, + 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, + 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, + 0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00, + 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, + 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00, + 0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00, + 0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00, + 0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, + 0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69, + 0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66, + 0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69, + 0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, + 0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f, + 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73, + 0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d, + 0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00, + 0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67, + 0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d, + 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65, + 0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74, + 0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, + 0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, + 0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72, + 0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00, + 0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00, + 0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00, + 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00, + 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72, + 0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00, + 0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00, + 0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, + 0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00, + 0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73, + 0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79, + 0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00, + 0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77, + 0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, + 0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00, + 0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30, + 0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00, + 0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31, + 0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72, + 0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73, + 0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69, + 0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65, + 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00, + 0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32, + 0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35, + 0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30, + 0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33, + 0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37, + 0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30, + 0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34, + 0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31, + 0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31, + 0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34, + 0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34, + 0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e, + 0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20, + 0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f, + 0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d, + 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34, + 0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30, + 0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30, + 0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64, + 0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e, + 0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64, + 0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74, + 0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, + 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, + 0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46, + 0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41, + 0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a, + 0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41, + 0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20, + 0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20, + 0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30, + 0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e, + 0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57, + 0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c, + 0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61, + 0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20, + 0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b, + 0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, + 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61, + 0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67, + 0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67, + 0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, + 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, + 0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c, + 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c, + 0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74, + 0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65, + 0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65, + 0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64, + 0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, + 0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63, + 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69, + 0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d, + 0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a, + 0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e, +} diff --git a/vendor/github.com/moby/spdystream/spdy/read.go b/vendor/github.com/moby/spdystream/spdy/read.go new file mode 100644 index 000000000..75ea045b8 --- /dev/null +++ b/vendor/github.com/moby/spdystream/spdy/read.go @@ -0,0 +1,364 @@ +/* + Copyright 2014-2021 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +import ( + "compress/zlib" + "encoding/binary" + "io" + "net/http" + "strings" +) + +func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readSynStreamFrame(h, frame) +} + +func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readSynReplyFrame(h, frame) +} + +func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { + return err + } + if frame.Status == 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + var numSettings uint32 + if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil { + return err + } + frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings) + for i := uint32(0); i < numSettings; i++ { + if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil { + return err + } + frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24) + frame.FlagIdValues[i].Id &= 0xffffff + if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil { + return err + } + } + return nil +} + +func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil { + return err + } + if frame.Id == 0 { + return &Error{ZeroStreamId, 0} + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, StreamId(frame.Id)} + } + return nil +} + +func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil { + return err + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, frame.LastGoodStreamId} + } + if frame.CFHeader.length != 8 { + return &Error{InvalidControlFrame, frame.LastGoodStreamId} + } + if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { + return err + } + return nil +} + +func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readHeadersFrame(h, frame) +} + +func (frame *WindowUpdateFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if frame.CFHeader.length != 8 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if err := binary.Read(f.r, binary.BigEndian, &frame.DeltaWindowSize); err != nil { + return err + } + return nil +} + +func newControlFrame(frameType ControlFrameType) (controlFrame, error) { + ctor, ok := cframeCtor[frameType] + if !ok { + return nil, &Error{Err: InvalidControlFrame} + } + return ctor(), nil +} + +var cframeCtor = map[ControlFrameType]func() controlFrame{ + TypeSynStream: func() controlFrame { return new(SynStreamFrame) }, + TypeSynReply: func() controlFrame { return new(SynReplyFrame) }, + TypeRstStream: func() controlFrame { return new(RstStreamFrame) }, + TypeSettings: func() controlFrame { return new(SettingsFrame) }, + TypePing: func() controlFrame { return new(PingFrame) }, + TypeGoAway: func() controlFrame { return new(GoAwayFrame) }, + TypeHeaders: func() controlFrame { return new(HeadersFrame) }, + TypeWindowUpdate: func() controlFrame { return new(WindowUpdateFrame) }, +} + +func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error { + if f.headerDecompressor != nil { + f.headerReader.N = payloadSize + return nil + } + f.headerReader = io.LimitedReader{R: f.r, N: payloadSize} + decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(headerDictionary)) + if err != nil { + return err + } + f.headerDecompressor = decompressor + return nil +} + +// ReadFrame reads SPDY encoded data and returns a decompressed Frame. +func (f *Framer) ReadFrame() (Frame, error) { + var firstWord uint32 + if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil { + return nil, err + } + if firstWord&0x80000000 != 0 { + frameType := ControlFrameType(firstWord & 0xffff) + version := uint16(firstWord >> 16 & 0x7fff) + return f.parseControlFrame(version, frameType) + } + return f.parseDataFrame(StreamId(firstWord & 0x7fffffff)) +} + +func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) { + var length uint32 + if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { + return nil, err + } + flags := ControlFlags((length & 0xff000000) >> 24) + length &= 0xffffff + header := ControlFrameHeader{version, frameType, flags, length} + cframe, err := newControlFrame(frameType) + if err != nil { + return nil, err + } + if err = cframe.read(header, f); err != nil { + return nil, err + } + return cframe, nil +} + +func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) { + var numHeaders uint32 + if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil { + return nil, err + } + var e error + h := make(http.Header, int(numHeaders)) + for i := 0; i < int(numHeaders); i++ { + var length uint32 + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + nameBytes := make([]byte, length) + if _, err := io.ReadFull(r, nameBytes); err != nil { + return nil, err + } + name := string(nameBytes) + if name != strings.ToLower(name) { + e = &Error{UnlowercasedHeaderName, streamId} + name = strings.ToLower(name) + } + if h[name] != nil { + e = &Error{DuplicateHeaders, streamId} + } + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + value := make([]byte, length) + if _, err := io.ReadFull(r, value); err != nil { + return nil, err + } + valueList := strings.Split(string(value), headerValueSeparator) + for _, v := range valueList { + h.Add(name, v) + } + } + if e != nil { + return h, e + } + return h, nil +} + +func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil { + return err + } + if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil { + return err + } + frame.Priority >>= 5 + if err = binary.Read(f.r, binary.BigEndian, &frame.Slot); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 10)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + for h := range frame.Headers { + if invalidReqHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 4)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + for h := range frame.Headers { + if invalidRespHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 4)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + var invalidHeaders map[string]bool + if frame.StreamId%2 == 0 { + invalidHeaders = invalidReqHeaders + } else { + invalidHeaders = invalidRespHeaders + } + for h := range frame.Headers { + if invalidHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) parseDataFrame(streamId StreamId) (*DataFrame, error) { + var length uint32 + if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { + return nil, err + } + var frame DataFrame + frame.StreamId = streamId + frame.Flags = DataFlags(length >> 24) + length &= 0xffffff + frame.Data = make([]byte, length) + if _, err := io.ReadFull(f.r, frame.Data); err != nil { + return nil, err + } + if frame.StreamId == 0 { + return nil, &Error{ZeroStreamId, 0} + } + return &frame, nil +} diff --git a/vendor/github.com/moby/spdystream/spdy/types.go b/vendor/github.com/moby/spdystream/spdy/types.go new file mode 100644 index 000000000..a254a43ab --- /dev/null +++ b/vendor/github.com/moby/spdystream/spdy/types.go @@ -0,0 +1,291 @@ +/* + Copyright 2014-2021 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package spdy implements the SPDY protocol (currently SPDY/3), described in +// http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3. +package spdy + +import ( + "bytes" + "compress/zlib" + "io" + "net/http" +) + +// Version is the protocol version number that this package implements. +const Version = 3 + +// ControlFrameType stores the type field in a control frame header. +type ControlFrameType uint16 + +const ( + TypeSynStream ControlFrameType = 0x0001 + TypeSynReply ControlFrameType = 0x0002 + TypeRstStream ControlFrameType = 0x0003 + TypeSettings ControlFrameType = 0x0004 + TypePing ControlFrameType = 0x0006 + TypeGoAway ControlFrameType = 0x0007 + TypeHeaders ControlFrameType = 0x0008 + TypeWindowUpdate ControlFrameType = 0x0009 +) + +// ControlFlags are the flags that can be set on a control frame. +type ControlFlags uint8 + +const ( + ControlFlagFin ControlFlags = 0x01 + ControlFlagUnidirectional ControlFlags = 0x02 + ControlFlagSettingsClearSettings ControlFlags = 0x01 +) + +// DataFlags are the flags that can be set on a data frame. +type DataFlags uint8 + +const ( + DataFlagFin DataFlags = 0x01 +) + +// MaxDataLength is the maximum number of bytes that can be stored in one frame. +const MaxDataLength = 1<<24 - 1 + +// headerValueSepator separates multiple header values. +const headerValueSeparator = "\x00" + +// Frame is a single SPDY frame in its unpacked in-memory representation. Use +// Framer to read and write it. +type Frame interface { + write(f *Framer) error +} + +// ControlFrameHeader contains all the fields in a control frame header, +// in its unpacked in-memory representation. +type ControlFrameHeader struct { + // Note, high bit is the "Control" bit. + version uint16 // spdy version number + frameType ControlFrameType + Flags ControlFlags + length uint32 // length of data field +} + +type controlFrame interface { + Frame + read(h ControlFrameHeader, f *Framer) error +} + +// StreamId represents a 31-bit value identifying the stream. +type StreamId uint32 + +// SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM +// frame. +type SynStreamFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + AssociatedToStreamId StreamId // stream id for a stream which this stream is associated to + Priority uint8 // priority of this frame (3-bit) + Slot uint8 // index in the server's credential vector of the client certificate + Headers http.Header +} + +// SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame. +type SynReplyFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Headers http.Header +} + +// RstStreamStatus represents the status that led to a RST_STREAM. +type RstStreamStatus uint32 + +const ( + ProtocolError RstStreamStatus = iota + 1 + InvalidStream + RefusedStream + UnsupportedVersion + Cancel + InternalError + FlowControlError + StreamInUse + StreamAlreadyClosed + InvalidCredentials + FrameTooLarge +) + +// RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM +// frame. +type RstStreamFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Status RstStreamStatus +} + +// SettingsFlag represents a flag in a SETTINGS frame. +type SettingsFlag uint8 + +const ( + FlagSettingsPersistValue SettingsFlag = 0x1 + FlagSettingsPersisted SettingsFlag = 0x2 +) + +// SettingsFlag represents the id of an id/value pair in a SETTINGS frame. +type SettingsId uint32 + +const ( + SettingsUploadBandwidth SettingsId = iota + 1 + SettingsDownloadBandwidth + SettingsRoundTripTime + SettingsMaxConcurrentStreams + SettingsCurrentCwnd + SettingsDownloadRetransRate + SettingsInitialWindowSize + SettingsClientCretificateVectorSize +) + +// SettingsFlagIdValue is the unpacked, in-memory representation of the +// combined flag/id/value for a setting in a SETTINGS frame. +type SettingsFlagIdValue struct { + Flag SettingsFlag + Id SettingsId + Value uint32 +} + +// SettingsFrame is the unpacked, in-memory representation of a SPDY +// SETTINGS frame. +type SettingsFrame struct { + CFHeader ControlFrameHeader + FlagIdValues []SettingsFlagIdValue +} + +// PingFrame is the unpacked, in-memory representation of a PING frame. +type PingFrame struct { + CFHeader ControlFrameHeader + Id uint32 // unique id for this ping, from server is even, from client is odd. +} + +// GoAwayStatus represents the status in a GoAwayFrame. +type GoAwayStatus uint32 + +const ( + GoAwayOK GoAwayStatus = iota + GoAwayProtocolError + GoAwayInternalError +) + +// GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame. +type GoAwayFrame struct { + CFHeader ControlFrameHeader + LastGoodStreamId StreamId // last stream id which was accepted by sender + Status GoAwayStatus +} + +// HeadersFrame is the unpacked, in-memory representation of a HEADERS frame. +type HeadersFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Headers http.Header +} + +// WindowUpdateFrame is the unpacked, in-memory representation of a +// WINDOW_UPDATE frame. +type WindowUpdateFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + DeltaWindowSize uint32 // additional number of bytes to existing window size +} + +// TODO: Implement credential frame and related methods. + +// DataFrame is the unpacked, in-memory representation of a DATA frame. +type DataFrame struct { + // Note, high bit is the "Control" bit. Should be 0 for data frames. + StreamId StreamId + Flags DataFlags + Data []byte // payload data of this frame +} + +// A SPDY specific error. +type ErrorCode string + +const ( + UnlowercasedHeaderName ErrorCode = "header was not lowercased" + DuplicateHeaders ErrorCode = "multiple headers with same name" + WrongCompressedPayloadSize ErrorCode = "compressed payload size was incorrect" + UnknownFrameType ErrorCode = "unknown frame type" + InvalidControlFrame ErrorCode = "invalid control frame" + InvalidDataFrame ErrorCode = "invalid data frame" + InvalidHeaderPresent ErrorCode = "frame contained invalid header" + ZeroStreamId ErrorCode = "stream id zero is disallowed" +) + +// Error contains both the type of error and additional values. StreamId is 0 +// if Error is not associated with a stream. +type Error struct { + Err ErrorCode + StreamId StreamId +} + +func (e *Error) Error() string { + return string(e.Err) +} + +var invalidReqHeaders = map[string]bool{ + "Connection": true, + "Host": true, + "Keep-Alive": true, + "Proxy-Connection": true, + "Transfer-Encoding": true, +} + +var invalidRespHeaders = map[string]bool{ + "Connection": true, + "Keep-Alive": true, + "Proxy-Connection": true, + "Transfer-Encoding": true, +} + +// Framer handles serializing/deserializing SPDY frames, including compressing/ +// decompressing payloads. +type Framer struct { + headerCompressionDisabled bool + w io.Writer + headerBuf *bytes.Buffer + headerCompressor *zlib.Writer + r io.Reader + headerReader io.LimitedReader + headerDecompressor io.ReadCloser +} + +// NewFramer allocates a new Framer for a given SPDY connection, represented by +// a io.Writer and io.Reader. Note that Framer will read and write individual fields +// from/to the Reader and Writer, so the caller should pass in an appropriately +// buffered implementation to optimize performance. +func NewFramer(w io.Writer, r io.Reader) (*Framer, error) { + compressBuf := new(bytes.Buffer) + compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary)) + if err != nil { + return nil, err + } + framer := &Framer{ + w: w, + headerBuf: compressBuf, + headerCompressor: compressor, + r: r, + } + return framer, nil +} diff --git a/vendor/github.com/moby/spdystream/spdy/write.go b/vendor/github.com/moby/spdystream/spdy/write.go new file mode 100644 index 000000000..ab6d91f3b --- /dev/null +++ b/vendor/github.com/moby/spdystream/spdy/write.go @@ -0,0 +1,334 @@ +/* + Copyright 2014-2021 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +import ( + "encoding/binary" + "io" + "net/http" + "strings" +) + +func (frame *SynStreamFrame) write(f *Framer) error { + return f.writeSynStreamFrame(frame) +} + +func (frame *SynReplyFrame) write(f *Framer) error { + return f.writeSynReplyFrame(frame) +} + +func (frame *RstStreamFrame) write(f *Framer) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeRstStream + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if frame.Status == 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { + return + } + return +} + +func (frame *SettingsFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSettings + frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil { + return + } + for _, flagIdValue := range frame.FlagIdValues { + flagId := uint32(flagIdValue.Flag)<<24 | uint32(flagIdValue.Id) + if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil { + return + } + } + return +} + +func (frame *PingFrame) write(f *Framer) (err error) { + if frame.Id == 0 { + return &Error{ZeroStreamId, 0} + } + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypePing + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 4 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil { + return + } + return +} + +func (frame *GoAwayFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeGoAway + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { + return + } + return nil +} + +func (frame *HeadersFrame) write(f *Framer) error { + return f.writeHeadersFrame(frame) +} + +func (frame *WindowUpdateFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeWindowUpdate + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.DeltaWindowSize); err != nil { + return + } + return nil +} + +func (frame *DataFrame) write(f *Framer) error { + return f.writeDataFrame(frame) +} + +// WriteFrame writes a frame. +func (f *Framer) WriteFrame(frame Frame) error { + return frame.write(f) +} + +func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error { + if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil { + return err + } + if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil { + return err + } + flagsAndLength := uint32(h.Flags)<<24 | h.length + if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil { + return err + } + return nil +} + +func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) { + n = 0 + if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil { + return + } + n += 2 + for name, values := range h { + if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil { + return + } + n += 2 + name = strings.ToLower(name) + if _, err = io.WriteString(w, name); err != nil { + return + } + n += len(name) + v := strings.Join(values, headerValueSeparator) + if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil { + return + } + n += 2 + if _, err = io.WriteString(w, v); err != nil { + return + } + n += len(v) + } + return +} + +func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSynStream + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<5); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.Slot); err != nil { + return err + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return err + } + f.headerBuf.Reset() + return nil +} + +func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSynReply + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return + } + f.headerBuf.Reset() + return +} + +func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeHeaders + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return + } + f.headerBuf.Reset() + return +} + +func (f *Framer) writeDataFrame(frame *DataFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + if frame.StreamId&0x80000000 != 0 || len(frame.Data) > MaxDataLength { + return &Error{InvalidDataFrame, frame.StreamId} + } + + // Serialize frame to Writer. + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data)) + if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil { + return + } + if _, err = f.w.Write(frame.Data); err != nil { + return + } + return nil +} diff --git a/vendor/github.com/moby/spdystream/stream.go b/vendor/github.com/moby/spdystream/stream.go new file mode 100644 index 000000000..404e3c02d --- /dev/null +++ b/vendor/github.com/moby/spdystream/stream.go @@ -0,0 +1,343 @@ +/* + Copyright 2014-2021 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package spdystream + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "sync" + "time" + + "github.com/moby/spdystream/spdy" +) + +var ( + ErrUnreadPartialData = errors.New("unread partial data") +) + +type Stream struct { + streamId spdy.StreamId + parent *Stream + conn *Connection + startChan chan error + + dataLock sync.RWMutex + dataChan chan []byte + unread []byte + + priority uint8 + headers http.Header + headerChan chan http.Header + finishLock sync.Mutex + finished bool + replyCond *sync.Cond + replied bool + closeLock sync.Mutex + closeChan chan bool +} + +// WriteData writes data to stream, sending a dataframe per call +func (s *Stream) WriteData(data []byte, fin bool) error { + s.waitWriteReply() + var flags spdy.DataFlags + + if fin { + flags = spdy.DataFlagFin + s.finishLock.Lock() + if s.finished { + s.finishLock.Unlock() + return ErrWriteClosedStream + } + s.finished = true + s.finishLock.Unlock() + } + + dataFrame := &spdy.DataFrame{ + StreamId: s.streamId, + Flags: flags, + Data: data, + } + + debugMessage("(%p) (%d) Writing data frame", s, s.streamId) + return s.conn.framer.WriteFrame(dataFrame) +} + +// Write writes bytes to a stream, calling write data for each call. +func (s *Stream) Write(data []byte) (n int, err error) { + err = s.WriteData(data, false) + if err == nil { + n = len(data) + } + return +} + +// Read reads bytes from a stream, a single read will never get more +// than what is sent on a single data frame, but a multiple calls to +// read may get data from the same data frame. +func (s *Stream) Read(p []byte) (n int, err error) { + if s.unread == nil { + select { + case <-s.closeChan: + return 0, io.EOF + case read, ok := <-s.dataChan: + if !ok { + return 0, io.EOF + } + s.unread = read + } + } + n = copy(p, s.unread) + if n < len(s.unread) { + s.unread = s.unread[n:] + } else { + s.unread = nil + } + return +} + +// ReadData reads an entire data frame and returns the byte array +// from the data frame. If there is unread data from the result +// of a Read call, this function will return an ErrUnreadPartialData. +func (s *Stream) ReadData() ([]byte, error) { + debugMessage("(%p) Reading data from %d", s, s.streamId) + if s.unread != nil { + return nil, ErrUnreadPartialData + } + select { + case <-s.closeChan: + return nil, io.EOF + case read, ok := <-s.dataChan: + if !ok { + return nil, io.EOF + } + return read, nil + } +} + +func (s *Stream) waitWriteReply() { + if s.replyCond != nil { + s.replyCond.L.Lock() + for !s.replied { + s.replyCond.Wait() + } + s.replyCond.L.Unlock() + } +} + +// Wait waits for the stream to receive a reply. +func (s *Stream) Wait() error { + return s.WaitTimeout(time.Duration(0)) +} + +// WaitTimeout waits for the stream to receive a reply or for timeout. +// When the timeout is reached, ErrTimeout will be returned. +func (s *Stream) WaitTimeout(timeout time.Duration) error { + var timeoutChan <-chan time.Time + if timeout > time.Duration(0) { + timeoutChan = time.After(timeout) + } + + select { + case err := <-s.startChan: + if err != nil { + return err + } + break + case <-timeoutChan: + return ErrTimeout + } + return nil +} + +// Close closes the stream by sending an empty data frame with the +// finish flag set, indicating this side is finished with the stream. +func (s *Stream) Close() error { + select { + case <-s.closeChan: + // Stream is now fully closed + s.conn.removeStream(s) + default: + break + } + return s.WriteData([]byte{}, true) +} + +// Reset sends a reset frame, putting the stream into the fully closed state. +func (s *Stream) Reset() error { + s.conn.removeStream(s) + return s.resetStream() +} + +func (s *Stream) resetStream() error { + // Always call closeRemoteChannels, even if s.finished is already true. + // This makes it so that stream.Close() followed by stream.Reset() allows + // stream.Read() to unblock. + s.closeRemoteChannels() + + s.finishLock.Lock() + if s.finished { + s.finishLock.Unlock() + return nil + } + s.finished = true + s.finishLock.Unlock() + + resetFrame := &spdy.RstStreamFrame{ + StreamId: s.streamId, + Status: spdy.Cancel, + } + return s.conn.framer.WriteFrame(resetFrame) +} + +// CreateSubStream creates a stream using the current as the parent +func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) { + return s.conn.CreateStream(headers, s, fin) +} + +// SetPriority sets the stream priority, does not affect the +// remote priority of this stream after Open has been called. +// Valid values are 0 through 7, 0 being the highest priority +// and 7 the lowest. +func (s *Stream) SetPriority(priority uint8) { + s.priority = priority +} + +// SendHeader sends a header frame across the stream +func (s *Stream) SendHeader(headers http.Header, fin bool) error { + return s.conn.sendHeaders(headers, s, fin) +} + +// SendReply sends a reply on a stream, only valid to be called once +// when handling a new stream +func (s *Stream) SendReply(headers http.Header, fin bool) error { + if s.replyCond == nil { + return errors.New("cannot reply on initiated stream") + } + s.replyCond.L.Lock() + defer s.replyCond.L.Unlock() + if s.replied { + return nil + } + + err := s.conn.sendReply(headers, s, fin) + if err != nil { + return err + } + + s.replied = true + s.replyCond.Broadcast() + return nil +} + +// Refuse sends a reset frame with the status refuse, only +// valid to be called once when handling a new stream. This +// may be used to indicate that a stream is not allowed +// when http status codes are not being used. +func (s *Stream) Refuse() error { + if s.replied { + return nil + } + s.replied = true + return s.conn.sendReset(spdy.RefusedStream, s) +} + +// Cancel sends a reset frame with the status canceled. This +// can be used at any time by the creator of the Stream to +// indicate the stream is no longer needed. +func (s *Stream) Cancel() error { + return s.conn.sendReset(spdy.Cancel, s) +} + +// ReceiveHeader receives a header sent on the other side +// of the stream. This function will block until a header +// is received or stream is closed. +func (s *Stream) ReceiveHeader() (http.Header, error) { + select { + case <-s.closeChan: + break + case header, ok := <-s.headerChan: + if !ok { + return nil, fmt.Errorf("header chan closed") + } + return header, nil + } + return nil, fmt.Errorf("stream closed") +} + +// Parent returns the parent stream +func (s *Stream) Parent() *Stream { + return s.parent +} + +// Headers returns the headers used to create the stream +func (s *Stream) Headers() http.Header { + return s.headers +} + +// String returns the string version of stream using the +// streamId to uniquely identify the stream +func (s *Stream) String() string { + return fmt.Sprintf("stream:%d", s.streamId) +} + +// Identifier returns a 32 bit identifier for the stream +func (s *Stream) Identifier() uint32 { + return uint32(s.streamId) +} + +// IsFinished returns whether the stream has finished +// sending data +func (s *Stream) IsFinished() bool { + return s.finished +} + +// Implement net.Conn interface + +func (s *Stream) LocalAddr() net.Addr { + return s.conn.conn.LocalAddr() +} + +func (s *Stream) RemoteAddr() net.Addr { + return s.conn.conn.RemoteAddr() +} + +// TODO set per stream values instead of connection-wide + +func (s *Stream) SetDeadline(t time.Time) error { + return s.conn.conn.SetDeadline(t) +} + +func (s *Stream) SetReadDeadline(t time.Time) error { + return s.conn.conn.SetReadDeadline(t) +} + +func (s *Stream) SetWriteDeadline(t time.Time) error { + return s.conn.conn.SetWriteDeadline(t) +} + +func (s *Stream) closeRemoteChannels() { + s.closeLock.Lock() + defer s.closeLock.Unlock() + select { + case <-s.closeChan: + default: + close(s.closeChan) + } +} diff --git a/vendor/github.com/moby/spdystream/utils.go b/vendor/github.com/moby/spdystream/utils.go new file mode 100644 index 000000000..e9f7fffd6 --- /dev/null +++ b/vendor/github.com/moby/spdystream/utils.go @@ -0,0 +1,32 @@ +/* + Copyright 2014-2021 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package spdystream + +import ( + "log" + "os" +) + +var ( + DEBUG = os.Getenv("DEBUG") +) + +func debugMessage(fmt string, args ...interface{}) { + if DEBUG != "" { + log.Printf(fmt, args...) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go index ac1ca3cf5..cf05079fb 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -69,9 +69,9 @@ type Collector interface { // If a Collector collects the same metrics throughout its lifetime, its // Describe method can simply be implemented as: // -// func (c customCollector) Describe(ch chan<- *Desc) { -// DescribeByCollect(c, ch) -// } +// func (c customCollector) Describe(ch chan<- *Desc) { +// DescribeByCollect(c, ch) +// } // // However, this will not work if the metrics collected change dynamically over // the lifetime of the Collector in a way that their combined set of descriptors diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index 00d70f09b..62de4dc59 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -51,7 +51,7 @@ type Counter interface { // will lead to a valid (label-less) exemplar. But if Labels is nil, the current // exemplar is left in place. AddWithExemplar panics if the value is < 0, if any // of the provided labels are invalid, or if the provided labels contain more -// than 64 runes in total. +// than 128 runes in total. type ExemplarAdder interface { AddWithExemplar(value float64, exemplar Labels) } @@ -59,6 +59,18 @@ type ExemplarAdder interface { // CounterOpts is an alias for Opts. See there for doc comments. type CounterOpts Opts +// CounterVecOpts bundles the options to create a CounterVec metric. +// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type CounterVecOpts struct { + CounterOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Contraint + // function, if provided. + VariableLabels ConstrainableLabels +} + // NewCounter creates a new Counter based on the provided CounterOpts. // // The returned implementation also implements ExemplarAdder. It is safe to @@ -140,12 +152,13 @@ func (c *counter) get() float64 { } func (c *counter) Write(out *dto.Metric) error { - val := c.get() - + // Read the Exemplar first and the value second. This is to avoid a race condition + // where users see an exemplar for a not-yet-existing observation. var exemplar *dto.Exemplar if e := c.exemplar.Load(); e != nil { exemplar = e.(*dto.Exemplar) } + val := c.get() return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) } @@ -173,16 +186,24 @@ type CounterVec struct { // NewCounterVec creates a new CounterVec based on the provided CounterOpts and // partitioned by the given label names. func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { - desc := NewDesc( + return V2.NewCounterVec(CounterVecOpts{ + CounterOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts. +func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec { + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &CounterVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs)) } result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now} result.init(result) // Init self-collection. @@ -245,7 +266,8 @@ func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) +// +// myVec.WithLabelValues("404", "GET").Add(42) func (v *CounterVec) WithLabelValues(lvs ...string) Counter { c, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -256,7 +278,8 @@ func (v *CounterVec) WithLabelValues(lvs ...string) Counter { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) func (v *CounterVec) With(labels Labels) Counter { c, err := v.GetMetricWith(labels) if err != nil { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 4bb816ab7..12331542d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -14,17 +14,16 @@ package prometheus import ( - "errors" "fmt" "sort" "strings" - "github.com/cespare/xxhash/v2" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" + "github.com/prometheus/client_golang/prometheus/internal" + "github.com/cespare/xxhash/v2" dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" + "google.golang.org/protobuf/proto" ) // Desc is the descriptor used by every Prometheus Metric. It is essentially @@ -51,9 +50,9 @@ type Desc struct { // constLabelPairs contains precalculated DTO label pairs based on // the constant labels. constLabelPairs []*dto.LabelPair - // variableLabels contains names of labels for which the metric - // maintains variable values. - variableLabels []string + // variableLabels contains names of labels and normalization function for + // which the metric maintains variable values. + variableLabels ConstrainedLabels // id is a hash of the values of the ConstLabels and fqName. This // must be unique among all registered descriptors and can therefore be // used as an identifier of the descriptor. @@ -77,10 +76,24 @@ type Desc struct { // For constLabels, the label values are constant. Therefore, they are fully // specified in the Desc. See the Collector example for a usage pattern. func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels) +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName must not be empty. +// +// variableLabels only contain the label names and normalization functions. Their +// label values are variable and therefore not part of the Desc. (They are managed +// within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Collector example for a usage pattern. +func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc { d := &Desc{ fqName: fqName, help: help, - variableLabels: variableLabels, + variableLabels: variableLabels.constrainedLabels(), } if !model.IsValidMetricName(model.LabelValue(fqName)) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) @@ -90,7 +103,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * // their sorted label names) plus the fqName (at position 0). labelValues := make([]string, 1, len(constLabels)+1) labelValues[0] = fqName - labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels)) labelNameSet := map[string]struct{}{} // First add only the const label names and sort them... for labelName := range constLabels { @@ -115,16 +128,16 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * // Now add the variable label names, but prefix them with something that // cannot be in a regular label name. That prevents matching the label // dimension with a different mix between preset and variable labels. - for _, labelName := range variableLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) + for _, label := range d.variableLabels { + if !checkLabelName(label.Name) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", label.Name, fqName) return d } - labelNames = append(labelNames, "$"+labelName) - labelNameSet[labelName] = struct{}{} + labelNames = append(labelNames, "$"+label.Name) + labelNameSet[label.Name] = struct{}{} } if len(labelNames) != len(labelNameSet) { - d.err = errors.New("duplicate label names") + d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName) return d } @@ -154,7 +167,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * Value: proto.String(v), }) } - sort.Sort(labelPairSorter(d.constLabelPairs)) + sort.Sort(internal.LabelPairSorter(d.constLabelPairs)) return d } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 98450125d..962608f02 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -21,55 +21,66 @@ // All exported functions and methods are safe to be used concurrently unless // specified otherwise. // -// A Basic Example +// # A Basic Example // // As a starting point, a very basic usage example: // -// package main -// -// import ( -// "log" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// var ( -// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }) -// hdFailures = prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }, -// []string{"device"}, -// ) -// ) -// -// func init() { -// // Metrics have to be registered to be exposed: -// prometheus.MustRegister(cpuTemp) -// prometheus.MustRegister(hdFailures) -// } -// -// func main() { -// cpuTemp.Set(65.3) -// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() -// -// // The Handler function provides a default handler to expose metrics -// // via an HTTP server. "/metrics" is the usual endpoint for that. -// http.Handle("/metrics", promhttp.Handler()) -// log.Fatal(http.ListenAndServe(":8080", nil)) -// } -// +// package main +// +// import ( +// "log" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// type metrics struct { +// cpuTemp prometheus.Gauge +// hdFailures *prometheus.CounterVec +// } +// +// func NewMetrics(reg prometheus.Registerer) *metrics { +// m := &metrics{ +// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }), +// hdFailures: prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ), +// } +// reg.MustRegister(m.cpuTemp) +// reg.MustRegister(m.hdFailures) +// return m +// } +// +// func main() { +// // Create a non-global registry. +// reg := prometheus.NewRegistry() +// +// // Create new metrics and register them using the custom registry. +// m := NewMetrics(reg) +// // Set values for the new created metrics. +// m.cpuTemp.Set(65.3) +// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // Expose metrics and custom registry via an HTTP server +// // using the HandleFor function. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) +// log.Fatal(http.ListenAndServe(":8080", nil)) +// } // // This is a complete program that exports two metrics, a Gauge and a Counter, // the latter with a label attached to turn it into a (one-dimensional) vector. +// It register the metrics using a custom registry and exposes them via an HTTP server +// on the /metrics endpoint. // -// Metrics +// # Metrics // // The number of exported identifiers in this package might appear a bit // overwhelming. However, in addition to the basic plumbing shown in the example @@ -100,7 +111,7 @@ // To create instances of Metrics and their vector versions, you need a suitable // …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. // -// Custom Collectors and constant Metrics +// # Custom Collectors and constant Metrics // // While you could create your own implementations of Metric, most likely you // will only ever implement the Collector interface on your own. At a first @@ -141,7 +152,7 @@ // a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting // shortcuts. // -// Advanced Uses of the Registry +// # Advanced Uses of the Registry // // While MustRegister is the by far most common way of registering a Collector, // sometimes you might want to handle the errors the registration might cause. @@ -176,23 +187,23 @@ // NewProcessCollector). With a custom registry, you are in control and decide // yourself about the Collectors to register. // -// HTTP Exposition +// # HTTP Exposition // // The Registry implements the Gatherer interface. The caller of the Gather // method can then expose the gathered metrics in some way. Usually, the metrics // are served via HTTP on the /metrics endpoint. That's happening in the example // above. The tools to expose metrics via HTTP are in the promhttp sub-package. // -// Pushing to the Pushgateway +// # Pushing to the Pushgateway // // Function for pushing to the Pushgateway can be found in the push sub-package. // -// Graphite Bridge +// # Graphite Bridge // // Functions and examples to push metrics from a Gatherer to Graphite can be // found in the graphite sub-package. // -// Other Means of Exposition +// # Other Means of Exposition // // More ways of exposing metrics can easily be added by following the approaches // of the existing implementations. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index bd0733d6a..f1ea6c76f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -55,6 +55,18 @@ type Gauge interface { // GaugeOpts is an alias for Opts. See there for doc comments. type GaugeOpts Opts +// GaugeVecOpts bundles the options to create a GaugeVec metric. +// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type GaugeVecOpts struct { + GaugeOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Contraint + // function, if provided. + VariableLabels ConstrainableLabels +} + // NewGauge creates a new Gauge based on the provided GaugeOpts. // // The returned implementation is optimized for a fast Set method. If you have a @@ -138,16 +150,24 @@ type GaugeVec struct { // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and // partitioned by the given label names. func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { - desc := NewDesc( + return V2.NewGaugeVec(GaugeVecOpts{ + GaugeOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts. +func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec { + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &GaugeVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs)) } result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} result.init(result) // Init self-collection. @@ -210,7 +230,8 @@ func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) +// +// myVec.WithLabelValues("404", "GET").Add(42) func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { g, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -221,7 +242,8 @@ func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) func (v *GaugeVec) With(labels Labels) Gauge { g, err := v.GetMetricWith(labels) if err != nil { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go new file mode 100644 index 000000000..614fd61be --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go @@ -0,0 +1,26 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !js || wasm +// +build !js wasm + +package prometheus + +import "os" + +func getPIDFn() func() (int, error) { + pid := os.Getpid() + return func() (int, error) { + return pid, nil + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go new file mode 100644 index 000000000..eaf8059ee --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build js && !wasm +// +build js,!wasm + +package prometheus + +func getPIDFn() func() (int, error) { + return func() (int, error) { + return 1, nil + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index 08195b410..ad9a71a5e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -19,6 +19,10 @@ import ( "time" ) +// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. +// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so +// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is +// populated using runtime/metrics. func goRuntimeMemStats() memStatsMetrics { return memStatsMetrics{ { @@ -197,14 +201,6 @@ func goRuntimeMemStats() memStatsMetrics { ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_cpu_fraction"), - "The fraction of this program's available CPU time used by the GC since the program started.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, - valType: GaugeValue, }, } } @@ -232,7 +228,7 @@ func newBaseGoCollector() baseGoCollector { "A summary of the pause duration of garbage collection cycles.", nil, nil), gcLastTimeDesc: NewDesc( - memstatNamespace("last_gc_time_seconds"), + "go_memstats_last_gc_time_seconds", "Number of seconds since 1970 of last garbage collection.", nil, nil), goInfoDesc: NewDesc( @@ -254,8 +250,9 @@ func (c *baseGoCollector) Describe(ch chan<- *Desc) { // Collect returns the current state of all metrics of the collector. func (c *baseGoCollector) Collect(ch chan<- Metric) { ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) - n, _ := runtime.ThreadCreateProfile(nil) - ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) + + n := getRuntimeNumThreads() + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n) var stats debug.GCStats stats.PauseQuantiles = make([]time.Duration, 5) @@ -268,7 +265,6 @@ func (c *baseGoCollector) Collect(ch chan<- Metric) { quantiles[0.0] = stats.PauseQuantiles[0].Seconds() ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9) - ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go index 24526131e..897a6e906 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go @@ -40,13 +40,28 @@ type goCollector struct { // // Deprecated: Use collectors.NewGoCollector instead. func NewGoCollector() Collector { + msMetrics := goRuntimeMemStats() + msMetrics = append(msMetrics, struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType + }{ + // This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, + }) return &goCollector{ base: newBaseGoCollector(), msLast: &runtime.MemStats{}, msRead: runtime.ReadMemStats, msMaxWait: time.Second, msMaxAge: 5 * time.Minute, - msMetrics: goRuntimeMemStats(), + msMetrics: msMetrics, } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go similarity index 52% rename from vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go rename to vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index d43bdcdda..2d8d9f64f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -23,12 +23,73 @@ import ( "strings" "sync" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" "github.com/prometheus/client_golang/prometheus/internal" + dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" +) + +const ( + // constants for strings referenced more than once. + goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects" + goGCHeapAllocsObjects = "/gc/heap/allocs:objects" + goGCHeapFreesObjects = "/gc/heap/frees:objects" + goGCHeapFreesBytes = "/gc/heap/frees:bytes" + goGCHeapAllocsBytes = "/gc/heap/allocs:bytes" + goGCHeapObjects = "/gc/heap/objects:objects" + goGCHeapGoalBytes = "/gc/heap/goal:bytes" + goMemoryClassesTotalBytes = "/memory/classes/total:bytes" + goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes" + goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes" + goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes" + goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes" + goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes" + goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes" + goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes" + goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes" + goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes" + goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes" + goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes" + goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes" + goMemoryClassesOtherBytes = "/memory/classes/other:bytes" ) +// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic. +var rmNamesForMemStatsMetrics = []string{ + goGCHeapTinyAllocsObjects, + goGCHeapAllocsObjects, + goGCHeapFreesObjects, + goGCHeapAllocsBytes, + goGCHeapObjects, + goGCHeapGoalBytes, + goMemoryClassesTotalBytes, + goMemoryClassesHeapObjectsBytes, + goMemoryClassesHeapUnusedBytes, + goMemoryClassesHeapReleasedBytes, + goMemoryClassesHeapFreeBytes, + goMemoryClassesHeapStacksBytes, + goMemoryClassesOSStacksBytes, + goMemoryClassesMetadataMSpanInuseBytes, + goMemoryClassesMetadataMSPanFreeBytes, + goMemoryClassesMetadataMCacheInuseBytes, + goMemoryClassesMetadataMCacheFreeBytes, + goMemoryClassesProfilingBucketsBytes, + goMemoryClassesMetadataOtherBytes, + goMemoryClassesOtherBytes, +} + +func bestEffortLookupRM(lookup []string) []metrics.Description { + ret := make([]metrics.Description, 0, len(lookup)) + for _, rm := range metrics.All() { + for _, m := range lookup { + if m == rm.Name { + ret = append(ret, rm) + } + } + } + return ret +} + type goCollector struct { base baseGoCollector @@ -36,70 +97,124 @@ type goCollector struct { // snapshot is always produced by Collect. mu sync.Mutex - // rm... fields all pertain to the runtime/metrics package. - rmSampleBuf []metrics.Sample - rmSampleMap map[string]*metrics.Sample - rmMetrics []collectorMetric + // Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed). + sampleBuf []metrics.Sample + // sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums. + sampleMap map[string]*metrics.Sample + + // rmExposedMetrics represents all runtime/metrics package metrics + // that were configured to be exposed. + rmExposedMetrics []collectorMetric + rmExactSumMapForHist map[string]string // With Go 1.17, the runtime/metrics package was introduced. // From that point on, metric names produced by the runtime/metrics // package could be generated from runtime/metrics names. However, // these differ from the old names for the same values. // - // This field exist to export the same values under the old names + // This field exists to export the same values under the old names // as well. - msMetrics memStatsMetrics + msMetrics memStatsMetrics + msMetricsEnabled bool +} + +type rmMetricDesc struct { + metrics.Description +} + +func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc { + var descs []rmMetricDesc + for _, d := range metrics.All() { + var ( + deny = true + desc rmMetricDesc + ) + + for _, r := range rules { + if !r.Matcher.MatchString(d.Name) { + continue + } + deny = r.Deny + } + if deny { + continue + } + + desc.Description = d + descs = append(descs, desc) + } + return descs +} + +func defaultGoCollectorOptions() internal.GoCollectorOptions { + return internal.GoCollectorOptions{ + RuntimeMetricSumForHist: map[string]string{ + "/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes, + "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, + }, + RuntimeMetricRules: []internal.GoCollectorRule{ + //{Matcher: regexp.MustCompile("")}, + }, + } } // NewGoCollector is the obsolete version of collectors.NewGoCollector. // See there for documentation. // // Deprecated: Use collectors.NewGoCollector instead. -func NewGoCollector() Collector { - descriptions := metrics.All() +func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { + opt := defaultGoCollectorOptions() + for _, o := range opts { + o(&opt) + } + + exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules) // Collect all histogram samples so that we can get their buckets. // The API guarantees that the buckets are always fixed for the lifetime // of the process. var histograms []metrics.Sample - for _, d := range descriptions { + for _, d := range exposedDescriptions { if d.Kind == metrics.KindFloat64Histogram { histograms = append(histograms, metrics.Sample{Name: d.Name}) } } - metrics.Read(histograms) + + if len(histograms) > 0 { + metrics.Read(histograms) + } + bucketsMap := make(map[string][]float64) for i := range histograms { bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets } - // Generate a Desc and ValueType for each runtime/metrics metric. - metricSet := make([]collectorMetric, 0, len(descriptions)) - sampleBuf := make([]metrics.Sample, 0, len(descriptions)) - sampleMap := make(map[string]*metrics.Sample, len(descriptions)) - for i := range descriptions { - d := &descriptions[i] - namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(d) + // Generate a collector for each exposed runtime/metrics metric. + metricSet := make([]collectorMetric, 0, len(exposedDescriptions)) + // SampleBuf is used for reading from runtime/metrics. + // We are assuming the largest case to have stable pointers for sampleMap purposes. + sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics)) + sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions)) + for _, d := range exposedDescriptions { + namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description) if !ok { // Just ignore this metric; we can't do anything with it here. // If a user decides to use the latest version of Go, we don't want - // to fail here. This condition is tested elsewhere. + // to fail here. This condition is tested in TestExpectedRuntimeMetrics. continue } - // Set up sample buffer for reading, and a map - // for quick lookup of sample values. sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] var m collectorMetric if d.Kind == metrics.KindFloat64Histogram { - _, hasSum := rmExactSumMap[d.Name] + _, hasSum := opt.RuntimeMetricSumForHist[d.Name] unit := d.Name[strings.IndexRune(d.Name, ':')+1:] m = newBatchHistogram( NewDesc( BuildFQName(namespace, subsystem, name), - d.Description, + d.Description.Description, nil, nil, ), @@ -111,24 +226,61 @@ func NewGoCollector() Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description, - }) + Help: d.Description.Description, + }, + ) } else { m = NewGauge(GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description, + Help: d.Description.Description, }) } metricSet = append(metricSet, m) } + + // Add exact sum metrics to sampleBuf if not added before. + for _, h := range histograms { + sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name] + if !ok { + continue + } + + if _, ok := sampleMap[sumMetric]; ok { + continue + } + sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric}) + sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1] + } + + var ( + msMetrics memStatsMetrics + msDescriptions []metrics.Description + ) + + if !opt.DisableMemStatsLikeMetrics { + msMetrics = goRuntimeMemStats() + msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics) + + // Check if metric was not exposed before and if not, add to sampleBuf. + for _, mdDesc := range msDescriptions { + if _, ok := sampleMap[mdDesc.Name]; ok { + continue + } + sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name}) + sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1] + } + } + return &goCollector{ - base: newBaseGoCollector(), - rmSampleBuf: sampleBuf, - rmSampleMap: sampleMap, - rmMetrics: metricSet, - msMetrics: goRuntimeMemStats(), + base: newBaseGoCollector(), + sampleBuf: sampleBuf, + sampleMap: sampleMap, + rmExposedMetrics: metricSet, + rmExactSumMapForHist: opt.RuntimeMetricSumForHist, + msMetrics: msMetrics, + msMetricsEnabled: !opt.DisableMemStatsLikeMetrics, } } @@ -138,7 +290,7 @@ func (c *goCollector) Describe(ch chan<- *Desc) { for _, i := range c.msMetrics { ch <- i.desc } - for _, m := range c.rmMetrics { + for _, m := range c.rmExposedMetrics { ch <- m.Desc() } } @@ -148,8 +300,12 @@ func (c *goCollector) Collect(ch chan<- Metric) { // Collect base non-memory metrics. c.base.Collect(ch) + if len(c.sampleBuf) == 0 { + return + } + // Collect must be thread-safe, so prevent concurrent use of - // rmSampleBuf. Just read into rmSampleBuf but write all the data + // sampleBuf elements. Just read into sampleBuf but write all the data // we get into our Metrics or MemStats. // // This lock also ensures that the Metrics we send out are all from @@ -164,14 +320,17 @@ func (c *goCollector) Collect(ch chan<- Metric) { defer c.mu.Unlock() // Populate runtime/metrics sample buffer. - metrics.Read(c.rmSampleBuf) + metrics.Read(c.sampleBuf) + + // Collect all our runtime/metrics user chose to expose from sampleBuf (if any). + for i, metric := range c.rmExposedMetrics { + // We created samples for exposed metrics first in order, so indexes match. + sample := c.sampleBuf[i] - // Update all our metrics from rmSampleBuf. - for i, sample := range c.rmSampleBuf { // N.B. switch on concrete type because it's significantly more efficient // than checking for the Counter and Gauge interface implementations. In // this case, we control all the types here. - switch m := c.rmMetrics[i].(type) { + switch m := metric.(type) { case *counter: // Guard against decreases. This should never happen, but a failure // to do so will result in a panic, which is a harsh consequence for @@ -191,12 +350,15 @@ func (c *goCollector) Collect(ch chan<- Metric) { panic("unexpected metric type") } } - // ms is a dummy MemStats that we populate ourselves so that we can - // populate the old metrics from it. - var ms runtime.MemStats - memStatsFromRM(&ms, c.rmSampleMap) - for _, i := range c.msMetrics { - ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms)) + + if c.msMetricsEnabled { + // ms is a dummy MemStats that we populate ourselves so that we can + // populate the old metrics from it if goMemStatsCollection is enabled. + var ms runtime.MemStats + memStatsFromRM(&ms, c.sampleMap) + for _, i := range c.msMetrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms)) + } } } @@ -224,11 +386,6 @@ func unwrapScalarRMValue(v metrics.Value) float64 { } } -var rmExactSumMap = map[string]string{ - "/gc/heap/allocs-by-size:bytes": "/gc/heap/allocs:bytes", - "/gc/heap/frees-by-size:bytes": "/gc/heap/frees:bytes", -} - // exactSumFor takes a runtime/metrics metric name (that is assumed to // be of kind KindFloat64Histogram) and returns its exact sum and whether // its exact sum exists. @@ -236,11 +393,11 @@ var rmExactSumMap = map[string]string{ // The runtime/metrics API for histograms doesn't currently expose exact // sums, but some of the other metrics are in fact exact sums of histograms. func (c *goCollector) exactSumFor(rmName string) float64 { - sumName, ok := rmExactSumMap[rmName] + sumName, ok := c.rmExactSumMapForHist[rmName] if !ok { return 0 } - s, ok := c.rmSampleMap[sumName] + s, ok := c.sampleMap[sumName] if !ok { return 0 } @@ -261,35 +418,30 @@ func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) { // while having Mallocs - Frees still represent a live object count. // Unfortunately, MemStats doesn't actually export a large allocation count, // so it's impossible to pull this number out directly. - tinyAllocs := lookupOrZero("/gc/heap/tiny/allocs:objects") - ms.Mallocs = lookupOrZero("/gc/heap/allocs:objects") + tinyAllocs - ms.Frees = lookupOrZero("/gc/heap/frees:objects") + tinyAllocs + tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects) + ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs + ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs - ms.TotalAlloc = lookupOrZero("/gc/heap/allocs:bytes") - ms.Sys = lookupOrZero("/memory/classes/total:bytes") + ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes) + ms.Sys = lookupOrZero(goMemoryClassesTotalBytes) ms.Lookups = 0 // Already always zero. - ms.HeapAlloc = lookupOrZero("/memory/classes/heap/objects:bytes") + ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes) ms.Alloc = ms.HeapAlloc - ms.HeapInuse = ms.HeapAlloc + lookupOrZero("/memory/classes/heap/unused:bytes") - ms.HeapReleased = lookupOrZero("/memory/classes/heap/released:bytes") - ms.HeapIdle = ms.HeapReleased + lookupOrZero("/memory/classes/heap/free:bytes") + ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes) + ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes) + ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes) ms.HeapSys = ms.HeapInuse + ms.HeapIdle - ms.HeapObjects = lookupOrZero("/gc/heap/objects:objects") - ms.StackInuse = lookupOrZero("/memory/classes/heap/stacks:bytes") - ms.StackSys = ms.StackInuse + lookupOrZero("/memory/classes/os-stacks:bytes") - ms.MSpanInuse = lookupOrZero("/memory/classes/metadata/mspan/inuse:bytes") - ms.MSpanSys = ms.MSpanInuse + lookupOrZero("/memory/classes/metadata/mspan/free:bytes") - ms.MCacheInuse = lookupOrZero("/memory/classes/metadata/mcache/inuse:bytes") - ms.MCacheSys = ms.MCacheInuse + lookupOrZero("/memory/classes/metadata/mcache/free:bytes") - ms.BuckHashSys = lookupOrZero("/memory/classes/profiling/buckets:bytes") - ms.GCSys = lookupOrZero("/memory/classes/metadata/other:bytes") - ms.OtherSys = lookupOrZero("/memory/classes/other:bytes") - ms.NextGC = lookupOrZero("/gc/heap/goal:bytes") - - // N.B. LastGC is omitted because runtime.GCStats already has this. - // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 - // for more details. - ms.LastGC = 0 + ms.HeapObjects = lookupOrZero(goGCHeapObjects) + ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes) + ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes) + ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes) + ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes) + ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes) + ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes) + ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes) + ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes) + ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes) + ms.NextGC = lookupOrZero(goGCHeapGoalBytes) // N.B. GCCPUFraction is intentionally omitted. This metric is not useful, // and often misleading due to the fact that it's an average over the lifetime @@ -324,6 +476,11 @@ type batchHistogram struct { // buckets must always be from the runtime/metrics package, following // the same conventions. func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram { + // We need to remove -Inf values. runtime/metrics keeps them around. + // But -Inf bucket should not be allowed for prometheus histograms. + if buckets[0] == math.Inf(-1) { + buckets = buckets[1:] + } h := &batchHistogram{ desc: desc, buckets: buckets, @@ -382,8 +539,10 @@ func (h *batchHistogram) Write(out *dto.Metric) error { for i, count := range h.counts { totalCount += count if !h.hasSum { - // N.B. This computed sum is an underestimate. - sum += h.buckets[i] * float64(count) + if count != 0 { + // N.B. This computed sum is an underestimate. + sum += h.buckets[i] * float64(count) + } } // Skip the +Inf bucket, but only for the bucket list. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 893802fd6..5b69965b2 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -22,25 +22,221 @@ import ( "sync/atomic" "time" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" + + "google.golang.org/protobuf/proto" ) +// nativeHistogramBounds for the frac of observed values. Only relevant for +// schema > 0. The position in the slice is the schema. (0 is never used, just +// here for convenience of using the schema directly as the index.) +// +// TODO(beorn7): Currently, we do a binary search into these slices. There are +// ways to turn it into a small number of simple array lookups. It probably only +// matters for schema 5 and beyond, but should be investigated. See this comment +// as a starting point: +// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310 +var nativeHistogramBounds = [][]float64{ + // Schema "0": + {0.5}, + // Schema 1: + {0.5, 0.7071067811865475}, + // Schema 2: + {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, + // Schema 3: + { + 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, + 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711, + }, + // Schema 4: + { + 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, + 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463, + 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627, + 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735, + }, + // Schema 5: + { + 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, + 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887, + 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666, + 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159, + 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112, + 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823, + 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533, + 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999, + }, + // Schema 6: + { + 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, + 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598, + 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209, + 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406, + 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349, + 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891, + 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515, + 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555, + 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234, + 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269, + 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334, + 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681, + 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529, + 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991, + 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827, + 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752, + }, + // Schema 7: + { + 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, + 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894, + 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309, + 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545, + 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393, + 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595, + 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754, + 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704, + 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907, + 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665, + 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253, + 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329, + 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032, + 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728, + 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265, + 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076, + 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491, + 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908, + 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126, + 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777, + 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764, + 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465, + 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821, + 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981, + 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312, + 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842, + 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671, + 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263, + 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943, + 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368, + 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164, + 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328, + }, + // Schema 8: + { + 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, + 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869, + 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205, + 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158, + 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313, + 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321, + 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954, + 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847, + 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111, + 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088, + 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098, + 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026, + 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894, + 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493, + 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185, + 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968, + 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903, + 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005, + 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725, + 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082, + 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581, + 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031, + 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346, + 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447, + 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385, + 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788, + 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727, + 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171, + 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058, + 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119, + 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999, + 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352, + 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471, + 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126, + 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218, + 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837, + 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984, + 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031, + 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071, + 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282, + 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442, + 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707, + 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818, + 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853, + 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642, + 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003, + 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079, + 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391, + 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661, + 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629, + 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553, + 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389, + 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771, + 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002, + 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155, + 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483, + 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253, + 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191, + 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693, + 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947, + 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133, + 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889, + 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168, + 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, + }, +} + +// The nativeHistogramBounds above can be generated with the code below. +// +// TODO(beorn7): It's tempting to actually use `go generate` to generate the +// code above. However, this could lead to slightly different numbers on +// different architectures. We still need to come to terms if we are fine with +// that, or if we might prefer to specify precise numbers in the standard. +// +// var nativeHistogramBounds [][]float64 = make([][]float64, 9) +// +// func init() { +// // Populate nativeHistogramBounds. +// numBuckets := 1 +// for i := range nativeHistogramBounds { +// bounds := []float64{0.5} +// factor := math.Exp2(math.Exp2(float64(-i))) +// for j := 0; j < numBuckets-1; j++ { +// var bound float64 +// if (j+1)%2 == 0 { +// // Use previously calculated value for increased precision. +// bound = nativeHistogramBounds[i-1][j/2+1] +// } else { +// bound = bounds[j] * factor +// } +// bounds = append(bounds, bound) +// } +// numBuckets *= 2 +// nativeHistogramBounds[i] = bounds +// } +// } + // A Histogram counts individual observations from an event or sample stream in -// configurable buckets. Similar to a summary, it also provides a sum of -// observations and an observation count. +// configurable static buckets (or in dynamic sparse buckets as part of the +// experimental Native Histograms, see below for more details). Similar to a +// Summary, it also provides a sum of observations and an observation count. // // On the Prometheus server, quantiles can be calculated from a Histogram using -// the histogram_quantile function in the query language. +// the histogram_quantile PromQL function. // -// Note that Histograms, in contrast to Summaries, can be aggregated with the -// Prometheus query language (see the documentation for detailed -// procedures). However, Histograms require the user to pre-define suitable -// buckets, and they are in general less accurate. The Observe method of a -// Histogram has a very low performance overhead in comparison with the Observe -// method of a Summary. +// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL +// (see the documentation for detailed procedures). However, Histograms require +// the user to pre-define suitable buckets, and they are in general less +// accurate. (Both problems are addressed by the experimental Native +// Histograms. To use them, configure a NativeHistogramBucketFactor in the +// HistogramOpts. They also require a Prometheus server v2.40+ with the +// corresponding feature flag enabled.) +// +// The Observe method of a Histogram has a very low performance overhead in +// comparison with the Observe method of a Summary. // // To create Histogram instances, use NewHistogram. type Histogram interface { @@ -50,7 +246,8 @@ type Histogram interface { // Observe adds a single observation to the histogram. Observations are // usually positive or zero. Negative observations are accepted but // prevent current versions of Prometheus from properly detecting - // counter resets in the sum of observations. See + // counter resets in the sum of observations. (The experimental Native + // Histograms handle negative observations properly.) See // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations // for details. Observe(float64) @@ -64,18 +261,28 @@ const bucketLabel = "le" // tailored to broadly measure the response time (in seconds) of a network // service. Most likely, however, you will be required to define buckets // customized to your use case. -var ( - DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} +var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} - errBucketLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in histograms", bucketLabel, - ) +// DefNativeHistogramZeroThreshold is the default value for +// NativeHistogramZeroThreshold in the HistogramOpts. +// +// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation), +// which is a bucket boundary at all possible resolutions. +const DefNativeHistogramZeroThreshold = 2.938735877055719e-39 + +// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold +// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero +// bucket that only receives observations of precisely zero. +const NativeHistogramZeroThresholdZero = -1 + +var errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, ) -// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest -// bucket has an upper bound of 'start'. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. +// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the +// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not +// counted and not included in the returned slice. The returned slice is meant +// to be used for the Buckets field of HistogramOpts. // // The function panics if 'count' is zero or negative. func LinearBuckets(start, width float64, count int) []float64 { @@ -90,11 +297,11 @@ func LinearBuckets(start, width float64, count int) []float64 { return buckets } -// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an -// upper bound of 'start' and each following bucket's upper bound is 'factor' -// times the previous bucket's upper bound. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. +// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket +// has an upper bound of 'start' and each following bucket's upper bound is +// 'factor' times the previous bucket's upper bound. The final +Inf bucket is +// not counted and not included in the returned slice. The returned slice is +// meant to be used for the Buckets field of HistogramOpts. // // The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, // or if 'factor' is less than or equal 1. @@ -180,8 +387,97 @@ type HistogramOpts struct { // element in the slice is the upper inclusive bound of a bucket. The // values must be sorted in strictly increasing order. There is no need // to add a highest bucket with +Inf bound, it will be added - // implicitly. The default value is DefBuckets. + // implicitly. If Buckets is left as nil or set to a slice of length + // zero, it is replaced by default buckets. The default buckets are + // DefBuckets if no buckets for a native histogram (see below) are used, + // otherwise the default is no buckets. (In other words, if you want to + // use both reguler buckets and buckets for a native histogram, you have + // to define the regular buckets here explicitly.) Buckets []float64 + + // If NativeHistogramBucketFactor is greater than one, so-called sparse + // buckets are used (in addition to the regular buckets, if defined + // above). A Histogram with sparse buckets will be ingested as a Native + // Histogram by a Prometheus server with that feature enabled (requires + // Prometheus v2.40+). Sparse buckets are exponential buckets covering + // the whole float64 range (with the exception of the “zero” bucket, see + // SparseBucketsZeroThreshold below). From any one bucket to the next, + // the width of the bucket grows by a constant + // factor. NativeHistogramBucketFactor provides an upper bound for this + // factor (exception see below). The smaller + // NativeHistogramBucketFactor, the more buckets will be used and thus + // the more costly the histogram will become. A generally good trade-off + // between cost and accuracy is a value of 1.1 (each bucket is at most + // 10% wider than the previous one), which will result in each power of + // two divided into 8 buckets (e.g. there will be 8 buckets between 1 + // and 2, same as between 2 and 4, and 4 and 8, etc.). + // + // Details about the actually used factor: The factor is calculated as + // 2^(2^n), where n is an integer number between (and including) -8 and + // 4. n is chosen so that the resulting factor is the largest that is + // still smaller or equal to NativeHistogramBucketFactor. Note that the + // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) + // ). If NativeHistogramBucketFactor is greater than 1 but smaller than + // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though + // it is larger than the provided NativeHistogramBucketFactor. + // + // NOTE: Native Histograms are still an experimental feature. Their + // behavior might still change without a major version + // bump. Subsequently, all NativeHistogram... options here might still + // change their behavior or name (or might completely disappear) without + // a major version bump. + NativeHistogramBucketFactor float64 + // All observations with an absolute value of less or equal + // NativeHistogramZeroThreshold are accumulated into a “zero” + // bucket. For best results, this should be close to a bucket + // boundary. This is usually the case if picking a power of two. If + // NativeHistogramZeroThreshold is left at zero, + // DefSparseBucketsZeroThreshold is used as the threshold. To configure + // a zero bucket with an actual threshold of zero (i.e. only + // observations of precisely zero will go into the zero bucket), set + // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero + // constant (or any negative float value). + NativeHistogramZeroThreshold float64 + + // The remaining fields define a strategy to limit the number of + // populated sparse buckets. If NativeHistogramMaxBucketNumber is left + // at zero, the number of buckets is not limited. (Note that this might + // lead to unbounded memory consumption if the values observed by the + // Histogram are sufficiently wide-spread. In particular, this could be + // used as a DoS attack vector. Where the observed values depend on + // external inputs, it is highly recommended to set a + // NativeHistogramMaxBucketNumber.) Once the set + // NativeHistogramMaxBucketNumber is exceeded, the following strategy is + // enacted: First, if the last reset (or the creation) of the histogram + // is at least NativeHistogramMinResetDuration ago, then the whole + // histogram is reset to its initial state (including regular + // buckets). If less time has passed, or if + // NativeHistogramMinResetDuration is zero, no reset is + // performed. Instead, the zero threshold is increased sufficiently to + // reduce the number of buckets to or below + // NativeHistogramMaxBucketNumber, but not to more than + // NativeHistogramMaxZeroThreshold. Thus, if + // NativeHistogramMaxZeroThreshold is already at or below the current + // zero threshold, nothing happens at this step. After that, if the + // number of buckets still exceeds NativeHistogramMaxBucketNumber, the + // resolution of the histogram is reduced by doubling the width of the + // sparse buckets (up to a growth factor between one bucket to the next + // of 2^(2^4) = 65536, see above). + NativeHistogramMaxBucketNumber uint32 + NativeHistogramMinResetDuration time.Duration + NativeHistogramMaxZeroThreshold float64 +} + +// HistogramVecOpts bundles the options to create a HistogramVec metric. +// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type HistogramVecOpts struct { + HistogramOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Contraint + // function, if provided. + VariableLabels ConstrainableLabels } // NewHistogram creates a new Histogram based on the provided HistogramOpts. It @@ -204,11 +500,11 @@ func NewHistogram(opts HistogramOpts) Histogram { func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues)) } for _, n := range desc.variableLabels { - if n == bucketLabel { + if n.Name == bucketLabel { panic(errBucketLabelNotAllowed) } } @@ -218,16 +514,29 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } - if len(opts.Buckets) == 0 { - opts.Buckets = DefBuckets - } - h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: MakeLabelPairs(desc, labelValues), - counts: [2]*histogramCounts{{}, {}}, - now: time.Now, + desc: desc, + upperBounds: opts.Buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber, + nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold, + nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, + lastResetTime: time.Now(), + now: time.Now, + } + if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { + h.upperBounds = DefBuckets + } + if opts.NativeHistogramBucketFactor <= 1 { + h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets. + } else { + switch { + case opts.NativeHistogramZeroThreshold > 0: + h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold + case opts.NativeHistogramZeroThreshold == 0: + h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold + } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. + h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -246,8 +555,12 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } // Finally we know the final length of h.upperBounds and can make buckets // for both counts as well as exemplars: - h.counts[0].buckets = make([]uint64, len(h.upperBounds)) - h.counts[1].buckets = make([]uint64, len(h.upperBounds)) + h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))} + atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema) + h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))} + atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema) h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) h.init(h) // Init self-collection. @@ -255,13 +568,98 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } type histogramCounts struct { + // Order in this struct matters for the alignment required by atomic + // operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG + // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + // observations. sumBits uint64 count uint64 + + // nativeHistogramZeroBucket counts all (positive and negative) + // observations in the zero bucket (with an absolute value less or equal + // the current threshold, see next field. + nativeHistogramZeroBucket uint64 + // nativeHistogramZeroThresholdBits is the bit pattern of the current + // threshold for the zero bucket. It's initially equal to + // nativeHistogramZeroThreshold but may change according to the bucket + // count limitation strategy. + nativeHistogramZeroThresholdBits uint64 + // nativeHistogramSchema may change over time according to the bucket + // count limitation strategy and therefore has to be saved here. + nativeHistogramSchema int32 + // Number of (positive and negative) sparse buckets. + nativeHistogramBucketsNumber uint32 + + // Regular buckets. buckets []uint64 + + // The sparse buckets for native histograms are implemented with a + // sync.Map for now. A dedicated data structure will likely be more + // efficient. There are separate maps for negative and positive + // observations. The map's value is an *int64, counting observations in + // that bucket. (Note that we don't use uint64 as an int64 won't + // overflow in practice, and working with signed numbers from the + // beginning simplifies the handling of deltas.) The map's key is the + // index of the bucket according to the used + // nativeHistogramSchema. Index 0 is for an upper bound of 1. + nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map +} + +// observe manages the parts of observe that only affects +// histogramCounts. doSparse is true if sparse buckets should be done, +// too. +func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { + if bucket < len(hc.buckets) { + atomic.AddUint64(&hc.buckets[bucket], 1) + } + atomicAddFloat(&hc.sumBits, v) + if doSparse && !math.IsNaN(v) { + var ( + key int + schema = atomic.LoadInt32(&hc.nativeHistogramSchema) + zeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits)) + bucketCreated, isInf bool + ) + if math.IsInf(v, 0) { + // Pretend v is MaxFloat64 but later increment key by one. + if math.IsInf(v, +1) { + v = math.MaxFloat64 + } else { + v = -math.MaxFloat64 + } + isInf = true + } + frac, exp := math.Frexp(math.Abs(v)) + if schema > 0 { + bounds := nativeHistogramBounds[schema] + key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds) + } else { + key = exp + if frac == 0.5 { + key-- + } + div := 1 << -schema + key = (key + div - 1) / div + } + if isInf { + key++ + } + switch { + case v > zeroThreshold: + bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1) + case v < -zeroThreshold: + bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1) + default: + atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1) + } + if bucketCreated { + atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1) + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hc.count, 1) } type histogram struct { @@ -276,7 +674,7 @@ type histogram struct { // perspective of the histogram) swap the hot–cold under the writeMtx // lock. A cooldown is awaited (while locked) by comparing the number of // observations with the initiation count. Once they match, then the - // last observation on the now cool one has completed. All cool fields must + // last observation on the now cool one has completed. All cold fields must // be merged into the new hot before releasing writeMtx. // // Fields with atomic access first! See alignment constraint: @@ -284,8 +682,10 @@ type histogram struct { countAndHotIdx uint64 selfCollector - desc *Desc - writeMtx sync.Mutex // Only used in the Write method. + desc *Desc + + // Only used in the Write method and for sparse bucket management. + mtx sync.Mutex // Two counts, one is "hot" for lock-free observations, the other is // "cold" for writing out a dto.Metric. It has to be an array of @@ -293,9 +693,15 @@ type histogram struct { // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. counts [2]*histogramCounts - upperBounds []float64 - labelPairs []*dto.LabelPair - exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + upperBounds []float64 + labelPairs []*dto.LabelPair + exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + nativeHistogramSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used. + nativeHistogramZeroThreshold float64 // The initial zero threshold. + nativeHistogramMaxZeroThreshold float64 + nativeHistogramMaxBuckets uint32 + nativeHistogramMinResetDuration time.Duration + lastResetTime time.Time // Protected by mtx. now func() time.Time // To mock out time.Now() for testing. } @@ -319,8 +725,8 @@ func (h *histogram) Write(out *dto.Metric) error { // the hot path, i.e. Observe is called much more often than Write. The // complication of making Write lock-free isn't worth it, if possible at // all. - h.writeMtx.Lock() - defer h.writeMtx.Unlock() + h.mtx.Lock() + defer h.mtx.Unlock() // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) // without touching the count bits. See the struct comments for a full @@ -333,16 +739,16 @@ func (h *histogram) Write(out *dto.Metric) error { hotCounts := h.counts[n>>63] coldCounts := h.counts[(^n)>>63] - // Await cooldown. - for count != atomic.LoadUint64(&coldCounts.count) { - runtime.Gosched() // Let observations get work done. - } + waitForCooldown(count, coldCounts) his := &dto.Histogram{ Bucket: make([]*dto.Bucket, len(h.upperBounds)), SampleCount: proto.Uint64(count), SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), } + out.Histogram = his + out.Label = h.labelPairs + var cumCount uint64 for i, upperBound := range h.upperBounds { cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) @@ -363,25 +769,21 @@ func (h *histogram) Write(out *dto.Metric) error { } his.Bucket = append(his.Bucket, b) } - - out.Histogram = his - out.Label = h.labelPairs - - // Finally add all the cold counts to the new hot counts and reset the cold counts. - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } - for i := range h.upperBounds { - atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) - atomic.StoreUint64(&coldCounts.buckets[i], 0) + if h.nativeHistogramSchema > math.MinInt32 { + his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits))) + his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema)) + zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket) + + defer func() { + coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber)) + coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber)) + }() + + his.ZeroCount = proto.Uint64(zeroBucket) + his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative) + his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive) } + addAndResetCounts(hotCounts, coldCounts) return nil } @@ -402,25 +804,216 @@ func (h *histogram) findBucket(v float64) int { // observe is the implementation for Observe without the findBucket part. func (h *histogram) observe(v float64, bucket int) { + // Do not add to sparse buckets for NaN observations. + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) // We increment h.countAndHotIdx so that the counter in the lower // 63 bits gets incremented. At the same time, we get the new value // back, which we can use to find the currently-hot counts. n := atomic.AddUint64(&h.countAndHotIdx, 1) hotCounts := h.counts[n>>63] + hotCounts.observe(v, bucket, doSparse) + if doSparse { + h.limitBuckets(hotCounts, v, bucket) + } +} - if bucket < len(h.upperBounds) { - atomic.AddUint64(&hotCounts.buckets[bucket], 1) +// limitSparsebuckets applies a strategy to limit the number of populated sparse +// buckets. It's generally best effort, and there are situations where the +// number can go higher (if even the lowest resolution isn't enough to reduce +// the number sufficiently, or if the provided counts aren't fully updated yet +// by a concurrently happening Write call). +func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) { + if h.nativeHistogramMaxBuckets == 0 { + return // No limit configured. } - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - break + if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) { + return // Bucket limit not exceeded yet. + } + + h.mtx.Lock() + defer h.mtx.Unlock() + + // The hot counts might have been swapped just before we acquired the + // lock. Re-fetch the hot counts first... + n := atomic.LoadUint64(&h.countAndHotIdx) + hotIdx := n >> 63 + coldIdx := (^n) >> 63 + hotCounts := h.counts[hotIdx] + coldCounts := h.counts[coldIdx] + // ...and then check again if we really have to reduce the bucket count. + if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) { + return // Bucket limit not exceeded after all. + } + // Try the various strategies in order. + if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) { + return + } + if h.maybeWidenZeroBucket(hotCounts, coldCounts) { + return + } + h.doubleBucketWidth(hotCounts, coldCounts) +} + +// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration +// has been passed. It returns true if the histogram has been reset. The caller +// must have locked h.mtx. +func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool { + // We are using the possibly mocked h.now() rather than + // time.Since(h.lastResetTime) to enable testing. + if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { + return false + } + // Completely reset coldCounts. + h.resetCounts(cold) + // Repeat the latest observation to not lose it completely. + cold.observe(value, bucket, true) + // Make coldCounts the new hot counts while ressetting countAndHotIdx. + n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) + count := n & ((1 << 63) - 1) + waitForCooldown(count, hot) + // Finally, reset the formerly hot counts, too. + h.resetCounts(hot) + h.lastResetTime = h.now() + return true +} + +// maybeWidenZeroBucket widens the zero bucket until it includes the existing +// buckets closest to the zero bucket (which could be two, if an equidistant +// negative and a positive bucket exists, but usually it's only one bucket to be +// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold +// limits how far the zero bucket can be extended, and if that's not enough to +// include an existing bucket, the method returns false. The caller must have +// locked h.mtx. +func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool { + currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits)) + if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold { + return false + } + // Find the key of the bucket closest to zero. + smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive) + smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative) + if smallestNegativeKey < smallestKey { + smallestKey = smallestNegativeKey + } + if smallestKey == math.MaxInt32 { + return false + } + newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema)) + if newZeroThreshold > h.nativeHistogramMaxZeroThreshold { + return false // New threshold would exceed the max threshold. + } + atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) + // Remove applicable buckets. + if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded { + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } + if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded { + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } + // Make cold counts the new hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + count := n & ((1 << 63) - 1) + // Swap the pointer names to represent the new roles and make + // the rest less confusing. + hot, cold = cold, hot + waitForCooldown(count, cold) + // Add all the now cold counts to the new hot counts... + addAndResetCounts(hot, cold) + // ...adjust the new zero threshold in the cold counts, too... + atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) + // ...and then merge the newly deleted buckets into the wider zero + // bucket. + mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + key := k.(int) + bucket := v.(*int64) + if key == smallestKey { + // Merge into hot zero bucket... + atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket))) + // ...and delete from cold counts. + coldBuckets.Delete(key) + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } else { + // Add to corresponding hot bucket... + if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) + } + // ...and reset cold bucket. + atomic.StoreInt64(bucket, 0) + } + return true } } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hotCounts.count, 1) + + cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive)) + cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative)) + return true +} + +// doubleBucketWidth doubles the bucket width (by decrementing the schema +// number). Note that very sparse buckets could lead to a low reduction of the +// bucket count (or even no reduction at all). The method does nothing if the +// schema is already -4. +func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) { + coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema) + if coldSchema == -4 { + return // Already at lowest resolution. + } + coldSchema-- + atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) + // Play it simple and just delete all cold buckets. + atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) + deleteSyncMap(&cold.nativeHistogramBucketsNegative) + deleteSyncMap(&cold.nativeHistogramBucketsPositive) + // Make coldCounts the new hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + count := n & ((1 << 63) - 1) + // Swap the pointer names to represent the new roles and make + // the rest less confusing. + hot, cold = cold, hot + waitForCooldown(count, cold) + // Add all the now cold counts to the new hot counts... + addAndResetCounts(hot, cold) + // ...adjust the schema in the cold counts, too... + atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) + // ...and then merge the cold buckets into the wider hot buckets. + merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + key := k.(int) + bucket := v.(*int64) + // Adjust key to match the bucket to merge into. + if key > 0 { + key++ + } + key /= 2 + // Add to corresponding hot bucket. + if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) + } + return true + } + } + + cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive)) + cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative)) + // Play it simple again and just delete all cold buckets. + atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) + deleteSyncMap(&cold.nativeHistogramBucketsNegative) + deleteSyncMap(&cold.nativeHistogramBucketsPositive) +} + +func (h *histogram) resetCounts(counts *histogramCounts) { + atomic.StoreUint64(&counts.sumBits, 0) + atomic.StoreUint64(&counts.count, 0) + atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0) + atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema) + atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0) + for i := range h.upperBounds { + atomic.StoreUint64(&counts.buckets[i], 0) + } + deleteSyncMap(&counts.nativeHistogramBucketsNegative) + deleteSyncMap(&counts.nativeHistogramBucketsPositive) } // updateExemplar replaces the exemplar for the provided bucket. With empty @@ -448,15 +1041,23 @@ type HistogramVec struct { // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and // partitioned by the given label names. func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { - desc := NewDesc( + return V2.NewHistogramVec(HistogramVecOpts{ + HistogramOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts. +func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec { + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &HistogramVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - return newHistogram(desc, opts, lvs...) + return newHistogram(desc, opts.HistogramOpts, lvs...) }), } } @@ -516,7 +1117,8 @@ func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) +// +// myVec.WithLabelValues("404", "GET").Observe(42.21) func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { h, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -527,7 +1129,8 @@ func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { // With works as GetMetricWith but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) func (v *HistogramVec) With(labels Labels) Observer { h, err := v.GetMetricWith(labels) if err != nil { @@ -581,11 +1184,11 @@ func (h *constHistogram) Desc() *Desc { func (h *constHistogram) Write(out *dto.Metric) error { his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) his.SampleCount = proto.Uint64(h.count) his.SampleSum = proto.Float64(h.sum) - for upperBound, count := range h.buckets { buckets = append(buckets, &dto.Bucket{ CumulativeCount: proto.Uint64(count), @@ -613,7 +1216,7 @@ func (h *constHistogram) Write(out *dto.Metric) error { // to send it to Prometheus in the Collect method. // // buckets is a map of upper bounds to cumulative counts, excluding the +Inf -// bucket. +// bucket. The +Inf bucket is implicit, and its value is equal to the provided count. // // NewConstHistogram returns an error if the length of labelValues is not // consistent with the variable labels in Desc or if Desc is invalid. @@ -668,3 +1271,229 @@ func (s buckSort) Swap(i, j int) { func (s buckSort) Less(i, j int) bool { return s[i].GetUpperBound() < s[j].GetUpperBound() } + +// pickSchema returns the largest number n between -4 and 8 such that +// 2^(2^-n) is less or equal the provided bucketFactor. +// +// Special cases: +// - bucketFactor <= 1: panics. +// - bucketFactor < 2^(2^-8) (but > 1): still returns 8. +func pickSchema(bucketFactor float64) int32 { + if bucketFactor <= 1 { + panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor)) + } + floor := math.Floor(math.Log2(math.Log2(bucketFactor))) + switch { + case floor <= -8: + return 8 + case floor >= 4: + return -4 + default: + return -int32(floor) + } +} + +func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) { + var ii []int + buckets.Range(func(k, v interface{}) bool { + ii = append(ii, k.(int)) + return true + }) + sort.Ints(ii) + + if len(ii) == 0 { + return nil, nil + } + + var ( + spans []*dto.BucketSpan + deltas []int64 + prevCount int64 + nextI int + ) + + appendDelta := func(count int64) { + *spans[len(spans)-1].Length++ + deltas = append(deltas, count-prevCount) + prevCount = count + } + + for n, i := range ii { + v, _ := buckets.Load(i) + count := atomic.LoadInt64(v.(*int64)) + // Multiple spans with only small gaps in between are probably + // encoded more efficiently as one larger span with a few empty + // buckets. Needs some research to find the sweet spot. For now, + // we assume that gaps of one ore two buckets should not create + // a new span. + iDelta := int32(i - nextI) + if n == 0 || iDelta > 2 { + // We have to create a new span, either because we are + // at the very beginning, or because we have found a gap + // of more than two buckets. + spans = append(spans, &dto.BucketSpan{ + Offset: proto.Int32(iDelta), + Length: proto.Uint32(0), + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < iDelta; j++ { + appendDelta(0) + } + } + appendDelta(count) + nextI = i + 1 + } + return spans, deltas +} + +// addToBucket increments the sparse bucket at key by the provided amount. It +// returns true if a new sparse bucket had to be created for that. +func addToBucket(buckets *sync.Map, key int, increment int64) bool { + if existingBucket, ok := buckets.Load(key); ok { + // Fast path without allocation. + atomic.AddInt64(existingBucket.(*int64), increment) + return false + } + // Bucket doesn't exist yet. Slow path allocating new counter. + newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape. + if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded { + // The bucket was created concurrently in another goroutine. + // Have to increment after all. + atomic.AddInt64(actualBucket.(*int64), increment) + return false + } + return true +} + +// addAndReset returns a function to be used with sync.Map.Range of spare +// buckets in coldCounts. It increments the buckets in the provided hotBuckets +// according to the buckets ranged through. It then resets all buckets ranged +// through to 0 (but leaves them in place so that they don't need to get +// recreated on the next scrape). +func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + bucket := v.(*int64) + if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) { + atomic.AddUint32(bucketNumber, 1) + } + atomic.StoreInt64(bucket, 0) + return true + } +} + +func deleteSyncMap(m *sync.Map) { + m.Range(func(k, v interface{}) bool { + m.Delete(k) + return true + }) +} + +func findSmallestKey(m *sync.Map) int { + result := math.MaxInt32 + m.Range(func(k, v interface{}) bool { + key := k.(int) + if key < result { + result = key + } + return true + }) + return result +} + +func getLe(key int, schema int32) float64 { + // Here a bit of context about the behavior for the last bucket counting + // regular numbers (called simply "last bucket" below) and the bucket + // counting observations of ±Inf (called "inf bucket" below, with a key + // one higher than that of the "last bucket"): + // + // If we apply the usual formula to the last bucket, its upper bound + // would be calculated as +Inf. The reason is that the max possible + // regular float64 number (math.MaxFloat64) doesn't coincide with one of + // the calculated bucket boundaries. So the calculated boundary has to + // be larger than math.MaxFloat64, and the only float64 larger than + // math.MaxFloat64 is +Inf. However, we want to count actual + // observations of ±Inf in the inf bucket. Therefore, we have to treat + // the upper bound of the last bucket specially and set it to + // math.MaxFloat64. (The upper bound of the inf bucket, with its key + // being one higher than that of the last bucket, naturally comes out as + // +Inf by the usual formula. So that's fine.) + // + // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of + // 1024. If there were a float64 number following math.MaxFloat64, it + // would have a frac of 1.0 and an exp of 1024, or equivalently a frac + // of 0.5 and an exp of 1025. However, since frac must be smaller than + // 1, and exp must be smaller than 1025, either representation overflows + // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the + // largest possible float64. Q.E.D.) However, the formula for + // calculating the upper bound from the idx and schema of the last + // bucket results in precisely that. It is either frac=1.0 & exp=1024 + // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is, + // by the way, a power of two where the exponent itself is a power of + // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all + // schemas.) So these are the special cases we have to catch below. + if schema < 0 { + exp := key << -schema + if exp == 1024 { + // This is the last bucket before the overflow bucket + // (for ±Inf observations). Return math.MaxFloat64 as + // explained above. + return math.MaxFloat64 + } + return math.Ldexp(1, exp) + } + + fracIdx := key & ((1 << schema) - 1) + frac := nativeHistogramBounds[schema][fracIdx] + exp := (key >> schema) + 1 + if frac == 0.5 && exp == 1025 { + // This is the last bucket before the overflow bucket (for ±Inf + // observations). Return math.MaxFloat64 as explained above. + return math.MaxFloat64 + } + return math.Ldexp(frac, exp) +} + +// waitForCooldown returns after the count field in the provided histogramCounts +// has reached the provided count value. +func waitForCooldown(count uint64, counts *histogramCounts) { + for count != atomic.LoadUint64(&counts.count) { + runtime.Gosched() // Let observations get work done. + } +} + +// atomicAddFloat adds the provided float atomically to another float +// represented by the bit pattern the bits pointer is pointing to. +func atomicAddFloat(bits *uint64, v float64) { + for { + loadedBits := atomic.LoadUint64(bits) + newBits := math.Float64bits(math.Float64frombits(loadedBits) + v) + if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) { + break + } + } +} + +// atomicDecUint32 atomically decrements the uint32 p points to. See +// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done. +func atomicDecUint32(p *uint32) { + atomic.AddUint32(p, ^uint32(0)) +} + +// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero +// bucket) from the cold counts to the corresponding fields in the hot +// counts. Those fields are then reset to 0 in the cold counts. +func addAndResetCounts(hot, cold *histogramCounts) { + atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count)) + atomic.StoreUint64(&cold.count, 0) + coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits)) + atomicAddFloat(&hot.sumBits, coldSum) + atomic.StoreUint64(&cold.sumBits, 0) + for i := range hot.buckets { + atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i])) + atomic.StoreUint64(&cold.buckets[i], 0) + } + atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) + atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go new file mode 100644 index 000000000..1ed5abe74 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go @@ -0,0 +1,60 @@ +// Copyright (c) 2015 Björn Rabenstein +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// The code in this package is copy/paste to avoid a dependency. Hence this file +// carries the copyright of the original repo. +// https://github.com/beorn7/floats +package internal + +import ( + "math" +) + +// minNormalFloat64 is the smallest positive normal value of type float64. +var minNormalFloat64 = math.Float64frombits(0x0010000000000000) + +// AlmostEqualFloat64 returns true if a and b are equal within a relative error +// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the +// details of the applied method. +func AlmostEqualFloat64(a, b, epsilon float64) bool { + if a == b { + return true + } + absA := math.Abs(a) + absB := math.Abs(b) + diff := math.Abs(a - b) + if a == 0 || b == 0 || absA+absB < minNormalFloat64 { + return diff < epsilon*minNormalFloat64 + } + return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon +} + +// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64. +func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if !AlmostEqualFloat64(a[i], b[i], epsilon) { + return false + } + } + return true +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go new file mode 100644 index 000000000..fd0750f2c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -0,0 +1,654 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// It provides tools to compare sequences of strings and generate textual diffs. +// +// Maintaining `GetUnifiedDiffString` here because original repository +// (https://github.com/pmezard/go-difflib) is no loger maintained. +package internal + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool, +) *SequenceMatcher { + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// +// and for all (i',j',k') meeting those conditions, +// +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize++ + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize++ + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{ + c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n), + }) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s]++ + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches++ + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning-- // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + wf := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := ws(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := ws("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := ws("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return w.String(), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go new file mode 100644 index 000000000..723b45d64 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go @@ -0,0 +1,32 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import "regexp" + +type GoCollectorRule struct { + Matcher *regexp.Regexp + Deny bool +} + +// GoCollectorOptions should not be used be directly by anything, except `collectors` package. +// Use it via collectors package instead. See issue +// https://github.com/prometheus/client_golang/issues/1030. +// +// This is internal, so external users only can use it via `collector.WithGoCollector*` methods +type GoCollectorOptions struct { + DisableMemStatsLikeMetrics bool + RuntimeMetricSumForHist map[string]string + RuntimeMetricRules []GoCollectorRule +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go index fe0a52180..97d17d6cb 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -61,9 +61,9 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) // name has - replaced with _ and is concatenated with the unit and // other data. name = strings.ReplaceAll(name, "-", "_") - name = name + "_" + unit - if d.Cumulative { - name = name + "_total" + name += "_" + unit + if d.Cumulative && d.Kind != metrics.KindFloat64Histogram { + name += "_total" } valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name)) @@ -84,12 +84,12 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 { switch unit { case "bytes": - // Rebucket as powers of 2. - return rebucketExp(buckets, 2) + // Re-bucket as powers of 2. + return reBucketExp(buckets, 2) case "seconds": - // Rebucket as powers of 10 and then merge all buckets greater + // Re-bucket as powers of 10 and then merge all buckets greater // than 1 second into the +Inf bucket. - b := rebucketExp(buckets, 10) + b := reBucketExp(buckets, 10) for i := range b { if b[i] <= 1 { continue @@ -103,11 +103,11 @@ func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 { return buckets } -// rebucketExp takes a list of bucket boundaries (lower bound inclusive) and +// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and // downsamples the buckets to those a multiple of base apart. The end result // is a roughly exponential (in many cases, perfectly exponential) bucketing // scheme. -func rebucketExp(buckets []float64, base float64) []float64 { +func reBucketExp(buckets []float64, base float64) []float64 { bucket := buckets[0] var newBuckets []float64 // We may see a -Inf here, in which case, add it and skip it diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go index 351c26e1a..6515c1148 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go @@ -19,18 +19,34 @@ import ( dto "github.com/prometheus/client_model/go" ) -// metricSorter is a sortable slice of *dto.Metric. -type metricSorter []*dto.Metric +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. +type LabelPairSorter []*dto.LabelPair -func (s metricSorter) Len() int { +func (s LabelPairSorter) Len() int { return len(s) } -func (s metricSorter) Swap(i, j int) { +func (s LabelPairSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s metricSorter) Less(i, j int) bool { +func (s LabelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +// MetricSorter is a sortable slice of *dto.Metric. +type MetricSorter []*dto.Metric + +func (s MetricSorter) Len() int { + return len(s) +} + +func (s MetricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s MetricSorter) Less(i, j int) bool { if len(s[i].Label) != len(s[j].Label) { // This should not happen. The metrics are // inconsistent. However, we have to deal with the fact, as @@ -68,7 +84,7 @@ func (s metricSorter) Less(i, j int) bool { // the slice, with the contained Metrics sorted within each MetricFamily. func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { for _, mf := range metricFamiliesByName { - sort.Sort(metricSorter(mf.Metric)) + sort.Sort(MetricSorter(mf.Metric)) } names := make([]string, 0, len(metricFamiliesByName)) for name, mf := range metricFamiliesByName { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index 2744443ac..63ff8683c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -25,12 +25,85 @@ import ( // Labels represents a collection of label name -> value mappings. This type is // commonly used with the With(Labels) and GetMetricWith(Labels) methods of // metric vector Collectors, e.g.: -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) // // The other use-case is the specification of constant label pairs in Opts or to // create a Desc. type Labels map[string]string +// ConstrainedLabels represents a label name and its constrain function +// to normalize label values. This type is commonly used when constructing +// metric vector Collectors. +type ConstrainedLabel struct { + Name string + Constraint func(string) string +} + +func (cl ConstrainedLabel) Constrain(v string) string { + if cl.Constraint == nil { + return v + } + return cl.Constraint(v) +} + +// ConstrainableLabels is an interface that allows creating of labels that can +// be optionally constrained. +// +// prometheus.V2().NewCounterVec(CounterVecOpts{ +// CounterOpts: {...}, // Usual CounterOpts fields +// VariableLabels: []ConstrainedLabels{ +// {Name: "A"}, +// {Name: "B", Constraint: func(v string) string { ... }}, +// }, +// }) +type ConstrainableLabels interface { + constrainedLabels() ConstrainedLabels + labelNames() []string +} + +// ConstrainedLabels represents a collection of label name -> constrain function +// to normalize label values. This type is commonly used when constructing +// metric vector Collectors. +type ConstrainedLabels []ConstrainedLabel + +func (cls ConstrainedLabels) constrainedLabels() ConstrainedLabels { + return cls +} + +func (cls ConstrainedLabels) labelNames() []string { + names := make([]string, len(cls)) + for i, label := range cls { + names[i] = label.Name + } + return names +} + +// UnconstrainedLabels represents collection of label without any constraint on +// their value. Thus, it is simply a collection of label names. +// +// UnconstrainedLabels([]string{ "A", "B" }) +// +// is equivalent to +// +// ConstrainedLabels { +// { Name: "A" }, +// { Name: "B" }, +// } +type UnconstrainedLabels []string + +func (uls UnconstrainedLabels) constrainedLabels() ConstrainedLabels { + constrainedLabels := make([]ConstrainedLabel, len(uls)) + for i, l := range uls { + constrainedLabels[i] = ConstrainedLabel{Name: l} + } + return constrainedLabels +} + +func (uls UnconstrainedLabels) labelNames() []string { + return uls +} + // reservedLabelPrefix is a prefix which is not legal in user-supplied // label names. const reservedLabelPrefix = "__" @@ -39,7 +112,7 @@ var errInconsistentCardinality = errors.New("inconsistent label cardinality") func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { return fmt.Errorf( - "%s: %q has %d variable labels named %q but %d values %q were provided", + "%w: %q has %d variable labels named %q but %d values %q were provided", errInconsistentCardinality, fqName, len(labels), labels, len(labelValues), labelValues, @@ -49,7 +122,7 @@ func makeInconsistentCardinalityError(fqName string, labels, labelValues []strin func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { if len(labels) != expectedNumberOfValues { return fmt.Errorf( - "%s: expected %d label values but got %d in %#v", + "%w: expected %d label values but got %d in %#v", errInconsistentCardinality, expectedNumberOfValues, len(labels), labels, ) @@ -67,7 +140,7 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { func validateLabelValues(vals []string, expectedNumberOfValues int) error { if len(vals) != expectedNumberOfValues { return fmt.Errorf( - "%s: expected %d label values but got %d in %#v", + "%w: expected %d label values but got %d in %#v", errInconsistentCardinality, expectedNumberOfValues, len(vals), vals, ) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index dc121910a..07bbc9d76 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -14,14 +14,15 @@ package prometheus import ( + "errors" + "math" + "sort" "strings" "time" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" - dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" + "google.golang.org/protobuf/proto" ) var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. @@ -115,22 +116,6 @@ func BuildFQName(namespace, subsystem, name string) string { return name } -// labelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. -type labelPairSorter []*dto.LabelPair - -func (s labelPairSorter) Len() int { - return len(s) -} - -func (s labelPairSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s labelPairSorter) Less(i, j int) bool { - return s[i].GetName() < s[j].GetName() -} - type invalidMetric struct { desc *Desc err error @@ -174,3 +159,96 @@ func (m timestampedMetric) Write(pb *dto.Metric) error { func NewMetricWithTimestamp(t time.Time, m Metric) Metric { return timestampedMetric{Metric: m, t: t} } + +type withExemplarsMetric struct { + Metric + + exemplars []*dto.Exemplar +} + +func (m *withExemplarsMetric) Write(pb *dto.Metric) error { + if err := m.Metric.Write(pb); err != nil { + return err + } + + switch { + case pb.Counter != nil: + pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1] + case pb.Histogram != nil: + for _, e := range m.exemplars { + // pb.Histogram.Bucket are sorted by UpperBound. + i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool { + return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue() + }) + if i < len(pb.Histogram.Bucket) { + pb.Histogram.Bucket[i].Exemplar = e + } else { + // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365. + b := &dto.Bucket{ + CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()), + UpperBound: proto.Float64(math.Inf(1)), + Exemplar: e, + } + pb.Histogram.Bucket = append(pb.Histogram.Bucket, b) + } + } + default: + // TODO(bwplotka): Implement Gauge? + return errors.New("cannot inject exemplar into Gauge, Summary or Untyped") + } + + return nil +} + +// Exemplar is easier to use, user-facing representation of *dto.Exemplar. +type Exemplar struct { + Value float64 + Labels Labels + // Optional. + // Default value (time.Time{}) indicates its empty, which should be + // understood as time.Now() time at the moment of creation of metric. + Timestamp time.Time +} + +// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given +// exemplars. Exemplars are validated. +// +// Only last applicable exemplar is injected from the list. +// For example for Counter it means last exemplar is injected. +// For Histogram, it means last applicable exemplar for each bucket is injected. +// +// NewMetricWithExemplars works best with MustNewConstMetric and +// MustNewConstHistogram, see example. +func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { + if len(exemplars) == 0 { + return nil, errors.New("no exemplar was passed for NewMetricWithExemplars") + } + + var ( + now = time.Now() + exs = make([]*dto.Exemplar, len(exemplars)) + err error + ) + for i, e := range exemplars { + ts := e.Timestamp + if ts == (time.Time{}) { + ts = now + } + exs[i], err = newExemplar(e.Value, ts, e.Labels) + if err != nil { + return nil, err + } + } + + return &withExemplarsMetric{Metric: m, exemplars: exs}, nil +} + +// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where +// NewMetricWithExemplars would have returned an error. +func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric { + ret, err := NewMetricWithExemplars(m, exemplars...) + if err != nil { + panic(err) + } + return ret +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go new file mode 100644 index 000000000..7c12b2108 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go @@ -0,0 +1,25 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !js || wasm +// +build !js wasm + +package prometheus + +import "runtime" + +// getRuntimeNumThreads returns the number of open OS threads. +func getRuntimeNumThreads() float64 { + n, _ := runtime.ThreadCreateProfile(nil) + return float64(n) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go new file mode 100644 index 000000000..7348df01d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build js && !wasm +// +build js,!wasm + +package prometheus + +// getRuntimeNumThreads returns the number of open OS threads. +func getRuntimeNumThreads() float64 { + return 1 +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go index 44128016f..03773b21f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/observer.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -58,7 +58,7 @@ type ObserverVec interface { // current time as timestamp, and the provided Labels. Empty Labels will lead to // a valid (label-less) exemplar. But if Labels is nil, the current exemplar is // left in place. ObserveWithExemplar panics if any of the provided labels are -// invalid or if the provided labels contain more than 64 runes in total. +// invalid or if the provided labels contain more than 128 runes in total. type ExemplarObserver interface { ObserveWithExemplar(value float64, exemplar Labels) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 5bfe0ff5b..8548dd18e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -16,7 +16,6 @@ package prometheus import ( "errors" "fmt" - "io/ioutil" "os" "strconv" "strings" @@ -104,8 +103,7 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { } if opts.PidFn == nil { - pid := os.Getpid() - c.pidFn = func() (int, error) { return pid, nil } + c.pidFn = getPIDFn() } else { c.pidFn = opts.PidFn } @@ -152,13 +150,13 @@ func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) // It is meant to be used for the PidFn field in ProcessCollectorOpts. func NewPidFileFn(pidFilePath string) func() (int, error) { return func() (int, error) { - content, err := ioutil.ReadFile(pidFilePath) + content, err := os.ReadFile(pidFilePath) if err != nil { - return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err) + return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err) } pid, err := strconv.Atoi(strings.TrimSpace(string(content))) if err != nil { - return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err) + return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err) } return pid, nil diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go new file mode 100644 index 000000000..b1e363d6c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go @@ -0,0 +1,26 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build js +// +build js + +package prometheus + +func canCollectProcess() bool { + return false +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + // noop on this platform + return +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index 2dc3660da..c0152cdb6 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !windows -// +build !windows +//go:build !windows && !js +// +build !windows,!js package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index e7c0d0546..9819917b8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -76,16 +76,19 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) { return n, err } -type closeNotifierDelegator struct{ *responseWriterDelegator } -type flusherDelegator struct{ *responseWriterDelegator } -type hijackerDelegator struct{ *responseWriterDelegator } -type readerFromDelegator struct{ *responseWriterDelegator } -type pusherDelegator struct{ *responseWriterDelegator } +type ( + closeNotifierDelegator struct{ *responseWriterDelegator } + flusherDelegator struct{ *responseWriterDelegator } + hijackerDelegator struct{ *responseWriterDelegator } + readerFromDelegator struct{ *responseWriterDelegator } + pusherDelegator struct{ *responseWriterDelegator } +) func (d closeNotifierDelegator) CloseNotify() <-chan bool { //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. return d.ResponseWriter.(http.CloseNotifier).CloseNotify() } + func (d flusherDelegator) Flush() { // If applicable, call WriteHeader here so that observeWriteHeader is // handled appropriately. @@ -94,9 +97,11 @@ func (d flusherDelegator) Flush() { } d.ResponseWriter.(http.Flusher).Flush() } + func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { return d.ResponseWriter.(http.Hijacker).Hijack() } + func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { // If applicable, call WriteHeader here so that observeWriteHeader is // handled appropriately. @@ -107,6 +112,7 @@ func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { d.written += n return n, err } + func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { return d.ResponseWriter.(http.Pusher).Push(target, opts) } @@ -261,7 +267,7 @@ func init() { http.Flusher }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} } - pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 23 return struct { *responseWriterDelegator http.Pusher diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index d86d0cf4b..a4cc9810b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -33,6 +33,7 @@ package promhttp import ( "compress/gzip" + "errors" "fmt" "io" "net/http" @@ -84,6 +85,13 @@ func Handler() http.Handler { // instrumentation. Use the InstrumentMetricHandler function to apply the same // kind of instrumentation as it is used by the Handler function. func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { + return HandlerForTransactional(prometheus.ToTransactionalGatherer(reg), opts) +} + +// HandlerForTransactional is like HandlerFor, but it uses transactional gather, which +// can safely change in-place returned *dto.MetricFamily before call to `Gather` and after +// call to `done` of that `Gather`. +func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerOpts) http.Handler { var ( inFlightSem chan struct{} errCnt = prometheus.NewCounterVec( @@ -103,7 +111,8 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { errCnt.WithLabelValues("gathering") errCnt.WithLabelValues("encoding") if err := opts.Registry.Register(errCnt); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { errCnt = are.ExistingCollector.(*prometheus.CounterVec) } else { panic(err) @@ -123,7 +132,8 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { return } } - mfs, err := reg.Gather() + mfs, done, err := reg.Gather() + defer done() if err != nil { if opts.ErrorLog != nil { opts.ErrorLog.Println("error gathering metrics:", err) @@ -242,7 +252,8 @@ func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) ht cnt.WithLabelValues("500") cnt.WithLabelValues("503") if err := reg.Register(cnt); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { cnt = are.ExistingCollector.(*prometheus.CounterVec) } else { panic(err) @@ -254,7 +265,8 @@ func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) ht Help: "Current number of scrapes being served.", }) if err := reg.Register(gge); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { gge = are.ExistingCollector.(prometheus.Gauge) } else { panic(err) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go index 861b4d21c..d3482c40c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -38,11 +38,11 @@ func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { // // See the example for ExampleInstrumentRoundTripperDuration for example usage. func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + return func(r *http.Request) (*http.Response, error) { gauge.Inc() defer gauge.Dec() return next.RoundTrip(r) - }) + } } // InstrumentRoundTripperCounter is a middleware that wraps the provided @@ -59,22 +59,29 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp // If the wrapped RoundTripper panics or returns a non-nil error, the Counter // is not incremented. // +// Use with WithExemplarFromContext to instrument the exemplars on the counter of requests. +// // See the example for ExampleInstrumentRoundTripperDuration for example usage. func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc { - rtOpts := &option{} + rtOpts := defaultOptions() for _, o := range opts { - o(rtOpts) + o.apply(rtOpts) } - code, method := checkLabels(counter) + // Curry the counter with dynamic labels before checking the remaining labels. + code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels())) - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + return func(r *http.Request) (*http.Response, error) { resp, err := next.RoundTrip(r) if err == nil { - counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc() + l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...) + for label, resolve := range rtOpts.extraLabelsFromCtx { + l[label] = resolve(resp.Request.Context()) + } + addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context())) } return resp, err - }) + } } // InstrumentRoundTripperDuration is a middleware that wraps the provided @@ -94,24 +101,31 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou // If the wrapped RoundTripper panics or returns a non-nil error, no values are // reported. // +// Use with WithExemplarFromContext to instrument the exemplars on the duration histograms. +// // Note that this method is only guaranteed to never observe negative durations // if used with Go1.9+. func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc { - rtOpts := &option{} + rtOpts := defaultOptions() for _, o := range opts { - o(rtOpts) + o.apply(rtOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels())) - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + return func(r *http.Request) (*http.Response, error) { start := time.Now() resp, err := next.RoundTrip(r) if err == nil { - obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Observe(time.Since(start).Seconds()) + l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...) + for label, resolve := range rtOpts.extraLabelsFromCtx { + l[label] = resolve(resp.Request.Context()) + } + observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context())) } return resp, err - }) + } } // InstrumentTrace is used to offer flexibility in instrumenting the available @@ -149,7 +163,7 @@ type InstrumentTrace struct { // // See the example for ExampleInstrumentRoundTripperDuration for example usage. func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + return func(r *http.Request) (*http.Response, error) { start := time.Now() trace := &httptrace.ClientTrace{ @@ -231,5 +245,5 @@ func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) Ro r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) return next.RoundTrip(r) - }) + } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index a23f0edc6..3793036ad 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -28,6 +28,26 @@ import ( // magicString is used for the hacky label test in checkLabels. Remove once fixed. const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" +// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver], +// which falls back to [prometheus.Observer.Observe] if no labels are provided. +func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) { + if labels == nil { + obs.Observe(val) + return + } + obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels) +} + +// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar], +// which falls back to [prometheus.Counter.Add] if no labels are provided. +func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) { + if labels == nil { + obs.Add(val) + return + } + obs.(prometheus.ExemplarAdder).AddWithExemplar(val, labels) +} + // InstrumentHandlerInFlight is a middleware that wraps the provided // http.Handler. It sets the provided prometheus.Gauge to the number of // requests currently handled by the wrapped http.Handler. @@ -48,7 +68,7 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl // names are "code" and "method". The function panics otherwise. For the "method" // label a predefined default label value set is used to filter given values. // Values besides predefined values will count as `unknown` method. -//`WithExtraMethods` can be used to add more methods to the set. The Observe +// `WithExtraMethods` can be used to add more methods to the set. The Observe // method of the Observer in the ObserverVec is called with the request duration // in seconds. Partitioning happens by HTTP status code and/or HTTP method if // the respective instance label names are present in the ObserverVec. For @@ -62,28 +82,37 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl // Note that this method is only guaranteed to never observe negative durations // if used with Go1.9+. func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - mwOpts := &option{} + hOpts := defaultOptions() for _, o := range opts { - o(mwOpts) + o.apply(hOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { now := time.Now() d := newDelegator(w, nil) next.ServeHTTP(d, r) - obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(time.Since(now).Seconds()) - }) + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) + } } - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { now := time.Now() next.ServeHTTP(w, r) - obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds()) - }) + l := labels(code, method, r.Method, 0, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) + } } // InstrumentHandlerCounter is a middleware that wraps the provided http.Handler @@ -104,25 +133,36 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc { - mwOpts := &option{} + hOpts := defaultOptions() for _, o := range opts { - o(mwOpts) + o.apply(hOpts) } - code, method := checkLabels(counter) + // Curry the counter with dynamic labels before checking the remaining labels. + code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels())) if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - counter.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Inc() - }) + + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context())) + } } - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) - counter.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Inc() - }) + + l := labels(code, method, r.Method, 0, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context())) + } } // InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided @@ -148,20 +188,25 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - mwOpts := &option{} + hOpts := defaultOptions() for _, o := range opts { - o(mwOpts) + o.apply(hOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { now := time.Now() d := newDelegator(w, func(status int) { - obs.With(labels(code, method, r.Method, status, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds()) + l := labels(code, method, r.Method, status, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) }) next.ServeHTTP(d, r) - }) + } } // InstrumentHandlerRequestSize is a middleware that wraps the provided @@ -184,27 +229,38 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - mwOpts := &option{} + hOpts := defaultOptions() for _, o := range opts { - o(mwOpts) + o.apply(hOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(size)) - }) + + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context())) + } } - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(float64(size)) - }) + + l := labels(code, method, r.Method, 0, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context())) + } } // InstrumentHandlerResponseSize is a middleware that wraps the provided @@ -227,17 +283,23 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler { - mwOpts := &option{} + hOpts := defaultOptions() for _, o := range opts { - o(mwOpts) + o.apply(hOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(d.Written())) + + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context())) }) } @@ -246,7 +308,7 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler // Collector does not have a Desc or has more than one Desc or its Desc is // invalid. It also panics if the Collector has any non-const, non-curried // labels that are not named "code" or "method". -func checkLabels(c prometheus.Collector) (code bool, method bool) { +func checkLabels(c prometheus.Collector) (code, method bool) { // TODO(beorn7): Remove this hacky way to check for instance labels // once Descriptors can have their dimensionality queried. var ( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go index 35e41bd1e..5d4383aa1 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go @@ -13,19 +13,72 @@ package promhttp -// Option are used to configure a middleware or round tripper.. -type Option func(*option) +import ( + "context" -type option struct { - extraMethods []string + "github.com/prometheus/client_golang/prometheus" +) + +// Option are used to configure both handler (middleware) or round tripper. +type Option interface { + apply(*options) +} + +// LabelValueFromCtx are used to compute the label value from request context. +// Context can be filled with values from request through middleware. +type LabelValueFromCtx func(ctx context.Context) string + +// options store options for both a handler or round tripper. +type options struct { + extraMethods []string + getExemplarFn func(requestCtx context.Context) prometheus.Labels + extraLabelsFromCtx map[string]LabelValueFromCtx +} + +func defaultOptions() *options { + return &options{ + getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }, + extraLabelsFromCtx: map[string]LabelValueFromCtx{}, + } } +func (o *options) emptyDynamicLabels() prometheus.Labels { + labels := prometheus.Labels{} + + for label := range o.extraLabelsFromCtx { + labels[label] = "" + } + + return labels +} + +type optionApplyFunc func(*options) + +func (o optionApplyFunc) apply(opt *options) { o(opt) } + // WithExtraMethods adds additional HTTP methods to the list of allowed methods. // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list. // // See the example for ExampleInstrumentHandlerWithExtraMethods for example usage. func WithExtraMethods(methods ...string) Option { - return func(o *option) { + return optionApplyFunc(func(o *options) { o.extraMethods = methods - } + }) +} + +// WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics. +// If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but +// metric will continue to observe/increment. +func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option { + return optionApplyFunc(func(o *options) { + o.getExemplarFn = getExemplarFn + }) +} + +// WithLabelFromCtx registers a label for dynamic resolution with access to context. +// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage +func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option { + return optionApplyFunc(func(o *options) { + o.extraLabelsFromCtx[name] = valueFn + }) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 383a7f594..44da9433b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -15,24 +15,23 @@ package prometheus import ( "bytes" + "errors" "fmt" - "io/ioutil" "os" "path/filepath" "runtime" "sort" + "strconv" "strings" "sync" "unicode/utf8" - "github.com/cespare/xxhash/v2" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/expfmt" + "github.com/prometheus/client_golang/prometheus/internal" + "github.com/cespare/xxhash/v2" dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus/internal" + "github.com/prometheus/common/expfmt" + "google.golang.org/protobuf/proto" ) const ( @@ -252,9 +251,12 @@ func (errs MultiError) MaybeUnwrap() error { } // Registry registers Prometheus collectors, collects their metrics, and gathers -// them into MetricFamilies for exposition. It implements both Registerer and -// Gatherer. The zero value is not usable. Create instances with NewRegistry or -// NewPedanticRegistry. +// them into MetricFamilies for exposition. It implements Registerer, Gatherer, +// and Collector. The zero value is not usable. Create instances with +// NewRegistry or NewPedanticRegistry. +// +// Registry implements Collector to allow it to be used for creating groups of +// metrics. See the Grouping example for how this can be done. type Registry struct { mtx sync.RWMutex collectorsByID map[uint64]Collector // ID is a hash of the descIDs. @@ -289,7 +291,7 @@ func (r *Registry) Register(c Collector) error { // Is the descriptor valid at all? if desc.err != nil { - return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + return fmt.Errorf("descriptor %s is invalid: %w", desc, desc.err) } // Is the descID unique? @@ -407,6 +409,14 @@ func (r *Registry) MustRegister(cs ...Collector) { // Gather implements Gatherer. func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + r.mtx.RLock() + + if len(r.collectorsByID) == 0 && len(r.uncheckedCollectors) == 0 { + // Fast path. + r.mtx.RUnlock() + return nil, nil + } + var ( checkedMetricChan = make(chan Metric, capMetricChan) uncheckedMetricChan = make(chan Metric, capMetricChan) @@ -416,7 +426,6 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { registeredDescIDs map[uint64]struct{} // Only used for pedantic checks ) - r.mtx.RLock() goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) checkedCollectors := make(chan Collector, len(r.collectorsByID)) @@ -549,6 +558,31 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() } +// Describe implements Collector. +func (r *Registry) Describe(ch chan<- *Desc) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + // Only report the checked Collectors; unchecked collectors don't report any + // Desc. + for _, c := range r.collectorsByID { + c.Describe(ch) + } +} + +// Collect implements Collector. +func (r *Registry) Collect(ch chan<- Metric) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + for _, c := range r.collectorsByID { + c.Collect(ch) + } + for _, c := range r.uncheckedCollectors { + c.Collect(ch) + } +} + // WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the // Prometheus text format, and writes it to a temporary file. Upon success, the // temporary file is renamed to the provided filename. @@ -556,7 +590,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { // This is intended for use with the textfile collector of the node exporter. // Note that the node exporter expects the filename to be suffixed with ".prom". func WriteToTextfile(filename string, g Gatherer) error { - tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) + tmp, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename)) if err != nil { return err } @@ -575,7 +609,7 @@ func WriteToTextfile(filename string, g Gatherer) error { return err } - if err := os.Chmod(tmp.Name(), 0644); err != nil { + if err := os.Chmod(tmp.Name(), 0o644); err != nil { return err } return os.Rename(tmp.Name(), filename) @@ -596,7 +630,7 @@ func processMetric( } dtoMetric := &dto.Metric{} if err := metric.Write(dtoMetric); err != nil { - return fmt.Errorf("error collecting metric %v: %s", desc, err) + return fmt.Errorf("error collecting metric %v: %w", desc, err) } metricFamily, ok := metricFamiliesByName[desc.fqName] if ok { // Existing name. @@ -718,12 +752,13 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { for i, g := range gs { mfs, err := g.Gather() if err != nil { - if multiErr, ok := err.(MultiError); ok { + multiErr := MultiError{} + if errors.As(err, &multiErr) { for _, err := range multiErr { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err)) } } else { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err)) } } for _, mf := range mfs { @@ -884,11 +919,11 @@ func checkMetricConsistency( h.Write(separatorByteSlice) // Make sure label pairs are sorted. We depend on it for the consistency // check. - if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { + if !sort.IsSorted(internal.LabelPairSorter(dtoMetric.Label)) { // We cannot sort dtoMetric.Label in place as it is immutable by contract. copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) copy(copiedLabels, dtoMetric.Label) - sort.Sort(labelPairSorter(copiedLabels)) + sort.Sort(internal.LabelPairSorter(copiedLabels)) dtoMetric.Label = copiedLabels } for _, lp := range dtoMetric.Label { @@ -897,6 +932,10 @@ func checkMetricConsistency( h.WriteString(lp.GetValue()) h.Write(separatorByteSlice) } + if dtoMetric.TimestampMs != nil { + h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10)) + h.Write(separatorByteSlice) + } hSum := h.Sum64() if _, exists := metricHashes[hSum]; exists { return fmt.Errorf( @@ -926,7 +965,7 @@ func checkDescConsistency( copy(lpsFromDesc, desc.constLabelPairs) for _, l := range desc.variableLabels { lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ - Name: proto.String(l), + Name: proto.String(l.Name), }) } if len(lpsFromDesc) != len(dtoMetric.Label) { @@ -935,7 +974,7 @@ func checkDescConsistency( metricFamily.GetName(), dtoMetric, desc, ) } - sort.Sort(labelPairSorter(lpsFromDesc)) + sort.Sort(internal.LabelPairSorter(lpsFromDesc)) for i, lpFromDesc := range lpsFromDesc { lpFromMetric := dtoMetric.Label[i] if lpFromDesc.GetName() != lpFromMetric.GetName() || @@ -948,3 +987,89 @@ func checkDescConsistency( } return nil } + +var _ TransactionalGatherer = &MultiTRegistry{} + +// MultiTRegistry is a TransactionalGatherer that joins gathered metrics from multiple +// transactional gatherers. +// +// It is caller responsibility to ensure two registries have mutually exclusive metric families, +// no deduplication will happen. +type MultiTRegistry struct { + tGatherers []TransactionalGatherer +} + +// NewMultiTRegistry creates MultiTRegistry. +func NewMultiTRegistry(tGatherers ...TransactionalGatherer) *MultiTRegistry { + return &MultiTRegistry{ + tGatherers: tGatherers, + } +} + +// Gather implements TransactionalGatherer interface. +func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err error) { + errs := MultiError{} + + dFns := make([]func(), 0, len(r.tGatherers)) + // TODO(bwplotka): Implement concurrency for those? + for _, g := range r.tGatherers { + // TODO(bwplotka): Check for duplicates? + m, d, err := g.Gather() + errs.Append(err) + + mfs = append(mfs, m...) + dFns = append(dFns, d) + } + + // TODO(bwplotka): Consider sort in place, given metric family in gather is sorted already. + sort.Slice(mfs, func(i, j int) bool { + return *mfs[i].Name < *mfs[j].Name + }) + return mfs, func() { + for _, d := range dFns { + d() + } + }, errs.MaybeUnwrap() +} + +// TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory +// used by metric family is no longer used by a caller. This allows implementations with cache. +type TransactionalGatherer interface { + // Gather returns metrics in a lexicographically sorted slice + // of uniquely named MetricFamily protobufs. Gather ensures that the + // returned slice is valid and self-consistent so that it can be used + // for valid exposition. As an exception to the strict consistency + // requirements described for metric.Desc, Gather will tolerate + // different sets of label names for metrics of the same metric family. + // + // Even if an error occurs, Gather attempts to gather as many metrics as + // possible. Hence, if a non-nil error is returned, the returned + // MetricFamily slice could be nil (in case of a fatal error that + // prevented any meaningful metric collection) or contain a number of + // MetricFamily protobufs, some of which might be incomplete, and some + // might be missing altogether. The returned error (which might be a + // MultiError) explains the details. Note that this is mostly useful for + // debugging purposes. If the gathered protobufs are to be used for + // exposition in actual monitoring, it is almost always better to not + // expose an incomplete result and instead disregard the returned + // MetricFamily protobufs in case the returned error is non-nil. + // + // Important: done is expected to be triggered (even if the error occurs!) + // once caller does not need returned slice of dto.MetricFamily. + Gather() (_ []*dto.MetricFamily, done func(), err error) +} + +// ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function. +func ToTransactionalGatherer(g Gatherer) TransactionalGatherer { + return &noTransactionGatherer{g: g} +} + +type noTransactionGatherer struct { + g Gatherer +} + +// Gather implements TransactionalGatherer interface. +func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) { + mfs, err := g.g.Gather() + return mfs, func() {}, err +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index c5fa8ed7c..dd359264e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -22,11 +22,10 @@ import ( "sync/atomic" "time" - "github.com/beorn7/perks/quantile" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" + + "github.com/beorn7/perks/quantile" + "google.golang.org/protobuf/proto" ) // quantileLabel is used for the label that defines the quantile in a @@ -148,6 +147,18 @@ type SummaryOpts struct { BufCap uint32 } +// SummaryVecOpts bundles the options to create a SummaryVec metric. +// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type SummaryVecOpts struct { + SummaryOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Contraint + // function, if provided. + VariableLabels ConstrainableLabels +} + // Problem with the sliding-window decay algorithm... The Merge method of // perk/quantile is actually not working as advertised - and it might be // unfixable, as the underlying algorithm is apparently not capable of merging @@ -178,11 +189,11 @@ func NewSummary(opts SummaryOpts) Summary { func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues)) } for _, n := range desc.variableLabels { - if n == quantileLabel { + if n.Name == quantileLabel { panic(errQuantileLabelNotAllowed) } } @@ -530,20 +541,28 @@ type SummaryVec struct { // it is handled by the Prometheus server internally, “quantile” is an illegal // label name. NewSummaryVec will panic if this label name is used. func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { - for _, ln := range labelNames { + return V2.NewSummaryVec(SummaryVecOpts{ + SummaryOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts. +func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec { + for _, ln := range opts.VariableLabels.labelNames() { if ln == quantileLabel { panic(errQuantileLabelNotAllowed) } } - desc := NewDesc( + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &SummaryVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - return newSummary(desc, opts, lvs...) + return newSummary(desc, opts.SummaryOpts, lvs...) }), } } @@ -603,7 +622,8 @@ func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) +// +// myVec.WithLabelValues("404", "GET").Observe(42.21) func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { s, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -614,7 +634,8 @@ func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) func (v *SummaryVec) With(labels Labels) Observer { s, err := v.GetMetricWith(labels) if err != nil { @@ -701,7 +722,8 @@ func (s *constSummary) Write(out *dto.Metric) error { // // quantiles maps ranks to quantile values. For example, a median latency of // 0.23s and a 99th percentile latency of 0.56s would be expressed as: -// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// map[float64]float64{0.5: 0.23, 0.99: 0.56} // // NewConstSummary returns an error if the length of labelValues is not // consistent with the variable labels in Desc or if Desc is invalid. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go index 8d5f10523..52344fef5 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -23,13 +23,24 @@ type Timer struct { } // NewTimer creates a new Timer. The provided Observer is used to observe a -// duration in seconds. Timer is usually used to time a function call in the +// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar +// later on will be also supported. +// Timer is usually used to time a function call in the // following way: -// func TimeMe() { -// timer := NewTimer(myHistogram) -// defer timer.ObserveDuration() -// // Do actual work. -// } +// +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } +// +// or +// +// func TimeMeWithExemplar() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDurationWithExemplar(exemplar) +// // Do actual work. +// } func NewTimer(o Observer) *Timer { return &Timer{ begin: time.Now(), @@ -52,3 +63,19 @@ func (t *Timer) ObserveDuration() time.Duration { } return d } + +// ObserveDurationWithExemplar is like ObserveDuration, but it will also +// observe exemplar with the duration unless exemplar is nil or provided Observer can't +// be casted to ExemplarObserver. +func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration { + d := time.Since(t.begin) + eo, ok := t.observer.(ExemplarObserver) + if ok && exemplar != nil { + eo.ObserveWithExemplar(d.Seconds(), exemplar) + return d + } + if t.observer != nil { + t.observer.Observe(d.Seconds()) + } + return d +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index b4e0ae11c..5f6bb8001 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -19,11 +19,11 @@ import ( "time" "unicode/utf8" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" + "github.com/prometheus/client_golang/prometheus/internal" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" ) // ValueType is an enumeration of metric types that represent a simple value. @@ -38,6 +38,23 @@ const ( UntypedValue ) +var ( + CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }() + GaugeMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }() + UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }() +) + +func (v ValueType) ToDTO() *dto.MetricType { + switch v { + case CounterValue: + return CounterMetricTypePtr + case GaugeValue: + return GaugeMetricTypePtr + default: + return UntypedMetricTypePtr + } +} + // valueFunc is a generic metric for simple values retrieved on collect time // from a function. It implements Metric and Collector. Its effective type is // determined by ValueType. This is a low-level building block used by the @@ -91,11 +108,15 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { return nil, err } + + metric := &dto.Metric{} + if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric); err != nil { + return nil, err + } + return &constMetric{ - desc: desc, - valType: valueType, - val: value, - labelPairs: MakeLabelPairs(desc, labelValues), + desc: desc, + metric: metric, }, nil } @@ -110,10 +131,8 @@ func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelVal } type constMetric struct { - desc *Desc - valType ValueType - val float64 - labelPairs []*dto.LabelPair + desc *Desc + metric *dto.Metric } func (m *constMetric) Desc() *Desc { @@ -121,7 +140,11 @@ func (m *constMetric) Desc() *Desc { } func (m *constMetric) Write(out *dto.Metric) error { - return populateMetric(m.valType, m.val, m.labelPairs, nil, out) + out.Label = m.metric.Label + out.Counter = m.metric.Counter + out.Gauge = m.metric.Gauge + out.Untyped = m.metric.Untyped + return nil } func populateMetric( @@ -163,19 +186,19 @@ func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { return desc.constLabelPairs } labelPairs := make([]*dto.LabelPair, 0, totalLen) - for i, n := range desc.variableLabels { + for i, l := range desc.variableLabels { labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(n), + Name: proto.String(l.Name), Value: proto.String(labelValues[i]), }) } labelPairs = append(labelPairs, desc.constLabelPairs...) - sort.Sort(labelPairSorter(labelPairs)) + sort.Sort(internal.LabelPairSorter(labelPairs)) return labelPairs } // ExemplarMaxRunes is the max total number of runes allowed in exemplar labels. -const ExemplarMaxRunes = 64 +const ExemplarMaxRunes = 128 // newExemplar creates a new dto.Exemplar from the provided values. An error is // returned if any of the label names or values are invalid or if the total diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 4ababe6c9..386fb2d23 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -72,6 +72,7 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { // with a performance overhead (for creating and processing the Labels map). // See also the CounterVec example. func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { + lvs = constrainLabelValues(m.desc, lvs, m.curry) h, err := m.hashLabelValues(lvs) if err != nil { return false @@ -91,6 +92,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { // This method is used for the same purpose as DeleteLabelValues(...string). See // there for pros and cons of the two methods. func (m *MetricVec) Delete(labels Labels) bool { + labels = constrainLabels(m.desc, labels) h, err := m.hashLabels(labels) if err != nil { return false @@ -99,6 +101,17 @@ func (m *MetricVec) Delete(labels Labels) bool { return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) } +// DeletePartialMatch deletes all metrics where the variable labels contain all of those +// passed in as labels. The order of the labels does not matter. +// It returns the number of metrics deleted. +// +// Note that curried labels will never be matched if deleting from the curried vector. +// To match curried labels with DeletePartialMatch, it must be called on the base vector. +func (m *MetricVec) DeletePartialMatch(labels Labels) int { + labels = constrainLabels(m.desc, labels) + return m.metricMap.deleteByLabels(labels, m.curry) +} + // Without explicit forwarding of Describe, Collect, Reset, those methods won't // show up in GoDoc. @@ -135,10 +148,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { iCurry int ) for i, label := range m.desc.variableLabels { - val, ok := labels[label] + val, ok := labels[label.Name] if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { if ok { - return nil, fmt.Errorf("label name %q is already curried", label) + return nil, fmt.Errorf("label name %q is already curried", label.Name) } newCurry = append(newCurry, oldCurry[iCurry]) iCurry++ @@ -146,7 +159,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { if !ok { continue // Label stays uncurried. } - newCurry = append(newCurry, curriedLabelValue{i, val}) + newCurry = append(newCurry, curriedLabelValue{i, label.Constrain(val)}) } } if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { @@ -189,6 +202,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { // a wrapper around MetricVec, implementing a vector for a specific Metric // implementation, for example GaugeVec. func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { + lvs = constrainLabelValues(m.desc, lvs, m.curry) h, err := m.hashLabelValues(lvs) if err != nil { return nil, err @@ -214,6 +228,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { // around MetricVec, implementing a vector for a specific Metric implementation, // for example GaugeVec. func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { + labels = constrainLabels(m.desc, labels) h, err := m.hashLabels(labels) if err != nil { return nil, err @@ -256,16 +271,16 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { iCurry int ) for i, label := range m.desc.variableLabels { - val, ok := labels[label] + val, ok := labels[label.Name] if iCurry < len(curry) && curry[iCurry].index == i { if ok { - return 0, fmt.Errorf("label name %q is already curried", label) + return 0, fmt.Errorf("label name %q is already curried", label.Name) } h = m.hashAdd(h, curry[iCurry].value) iCurry++ } else { if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label) + return 0, fmt.Errorf("label name %q missing in label map", label.Name) } h = m.hashAdd(h, val) } @@ -381,6 +396,82 @@ func (m *metricMap) deleteByHashWithLabels( return true } +// deleteByLabels deletes a metric if the given labels are present in the metric. +func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int { + m.mtx.Lock() + defer m.mtx.Unlock() + + var numDeleted int + + for h, metrics := range m.metrics { + i := findMetricWithPartialLabels(m.desc, metrics, labels, curry) + if i >= len(metrics) { + // Didn't find matching labels in this metric slice. + continue + } + delete(m.metrics, h) + numDeleted++ + } + + return numDeleted +} + +// findMetricWithPartialLabel returns the index of the matching metric or +// len(metrics) if not found. +func findMetricWithPartialLabels( + desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchPartialLabels(desc, metric.values, labels, curry) { + return i + } + } + return len(metrics) +} + +// indexOf searches the given slice of strings for the target string and returns +// the index or len(items) as well as a boolean whether the search succeeded. +func indexOf(target string, items []string) (int, bool) { + for i, l := range items { + if l == target { + return i, true + } + } + return len(items), false +} + +// valueMatchesVariableOrCurriedValue determines if a value was previously curried, +// and returns whether it matches either the "base" value or the curried value accordingly. +// It also indicates whether the match is against a curried or uncurried value. +func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) { + for _, curriedValue := range curry { + if curriedValue.index == index { + // This label was curried. See if the curried value matches our target. + return curriedValue.value == targetValue, true + } + } + // This label was not curried. See if the current value matches our target label. + return values[index] == targetValue, false +} + +// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present. +func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { + for l, v := range labels { + // Check if the target label exists in our metrics and get the index. + varLabelIndex, validLabel := indexOf(l, desc.variableLabels.labelNames()) + if validLabel { + // Check the value of that label against the target value. + // We don't consider curried values in partial matches. + matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry) + if matches && !curried { + continue + } + } + return false + } + return true +} + // getOrCreateMetricWithLabelValues retrieves the metric by hash and label value // or creates it and returns the new one. // @@ -485,7 +576,7 @@ func findMetricWithLabels( return len(metrics) } -func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { +func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool { if len(values) != len(lvs)+len(curry) { return false } @@ -519,7 +610,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe iCurry++ continue } - if values[i] != labels[k] { + if values[i] != labels[k.Name] { return false } } @@ -535,7 +626,7 @@ func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) [] iCurry++ continue } - labelValues[i] = labels[k] + labelValues[i] = labels[k.Name] } return labelValues } @@ -554,3 +645,34 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { } return labelValues } + +func constrainLabels(desc *Desc, labels Labels) Labels { + constrainedValues := make(Labels, len(labels)) + for l, v := range labels { + if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok { + constrainedValues[l] = desc.variableLabels[i].Constrain(v) + continue + } + constrainedValues[l] = v + } + return constrainedValues +} + +func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string { + constrainedValues := make([]string, len(lvs)) + var iCurry, iLVs int + for i := 0; i < len(lvs)+len(curry); i++ { + if iCurry < len(curry) && curry[iCurry].index == i { + iCurry++ + continue + } + + if i < len(desc.variableLabels) { + constrainedValues[iLVs] = desc.variableLabels[i].Constrain(lvs[iLVs]) + } else { + constrainedValues[iLVs] = lvs[iLVs] + } + iLVs++ + } + return constrainedValues +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vnext.go b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go new file mode 100644 index 000000000..42bc3a8f0 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go @@ -0,0 +1,23 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +type v2 struct{} + +// V2 is a struct that can be referenced to access experimental API that might +// be present in v2 of client golang someday. It offers extended functionality +// of v1 with slightly changed API. It is acceptable to use some pieces from v1 +// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc` +// in the same codebase. +var V2 = v2{} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index 74ee93280..25da157f1 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -17,10 +17,10 @@ import ( "fmt" "sort" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" + "github.com/prometheus/client_golang/prometheus/internal" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" ) // WrapRegistererWith returns a Registerer wrapping the provided @@ -182,7 +182,7 @@ func (m *wrappingMetric) Write(out *dto.Metric) error { Value: proto.String(lv), }) } - sort.Sort(labelPairSorter(out.Label)) + sort.Sort(internal.LabelPairSorter(out.Label)) return nil } @@ -204,7 +204,7 @@ func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { constLabels[ln] = lv } // NewDesc will do remaining validations. - newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) + newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) // Propagate errors if there was any. This will override any errer // created by NewDesc above, i.e. earlier errors get precedence. if desc.err != nil { diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 2f4930d9d..2b5bca4b9 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -1,51 +1,75 @@ +// Copyright 2013 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. -// source: metrics.proto +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.20.3 +// source: io/prometheus/client/metrics.proto package io_prometheus_client import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type MetricType int32 const ( - MetricType_COUNTER MetricType = 0 - MetricType_GAUGE MetricType = 1 - MetricType_SUMMARY MetricType = 2 - MetricType_UNTYPED MetricType = 3 + // COUNTER must use the Metric field "counter". + MetricType_COUNTER MetricType = 0 + // GAUGE must use the Metric field "gauge". + MetricType_GAUGE MetricType = 1 + // SUMMARY must use the Metric field "summary". + MetricType_SUMMARY MetricType = 2 + // UNTYPED must use the Metric field "untyped". + MetricType_UNTYPED MetricType = 3 + // HISTOGRAM must use the Metric field "histogram". MetricType_HISTOGRAM MetricType = 4 + // GAUGE_HISTOGRAM must use the Metric field "histogram". + MetricType_GAUGE_HISTOGRAM MetricType = 5 ) -var MetricType_name = map[int32]string{ - 0: "COUNTER", - 1: "GAUGE", - 2: "SUMMARY", - 3: "UNTYPED", - 4: "HISTOGRAM", -} - -var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, -} +// Enum value maps for MetricType. +var ( + MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", + 5: "GAUGE_HISTOGRAM", + } + MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, + "GAUGE_HISTOGRAM": 5, + } +) func (x MetricType) Enum() *MetricType { p := new(MetricType) @@ -54,670 +78,1255 @@ func (x MetricType) Enum() *MetricType { } func (x MetricType) String() string { - return proto.EnumName(MetricType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MetricType) Descriptor() protoreflect.EnumDescriptor { + return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor() } -func (x *MetricType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") +func (MetricType) Type() protoreflect.EnumType { + return &file_io_prometheus_client_metrics_proto_enumTypes[0] +} + +func (x MetricType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *MetricType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) if err != nil { return err } - *x = MetricType(value) + *x = MetricType(num) return nil } +// Deprecated: Use MetricType.Descriptor instead. func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{0} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0} } type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (m *LabelPair) String() string { return proto.CompactTextString(m) } -func (*LabelPair) ProtoMessage() {} -func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{0} + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` } -func (m *LabelPair) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelPair.Unmarshal(m, b) -} -func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) -} -func (m *LabelPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelPair.Merge(m, src) +func (x *LabelPair) Reset() { + *x = LabelPair{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *LabelPair) XXX_Size() int { - return xxx_messageInfo_LabelPair.Size(m) + +func (x *LabelPair) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *LabelPair) XXX_DiscardUnknown() { - xxx_messageInfo_LabelPair.DiscardUnknown(m) + +func (*LabelPair) ProtoMessage() {} + +func (x *LabelPair) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_LabelPair proto.InternalMessageInfo +// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead. +func (*LabelPair) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0} +} -func (m *LabelPair) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (x *LabelPair) GetName() string { + if x != nil && x.Name != nil { + return *x.Name } return "" } -func (m *LabelPair) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value +func (x *LabelPair) GetValue() string { + if x != nil && x.Value != nil { + return *x.Value } return "" } type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Gauge) Reset() { *m = Gauge{} } -func (m *Gauge) String() string { return proto.CompactTextString(m) } -func (*Gauge) ProtoMessage() {} -func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{1} + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` } -func (m *Gauge) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Gauge.Unmarshal(m, b) -} -func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) -} -func (m *Gauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_Gauge.Merge(m, src) +func (x *Gauge) Reset() { + *x = Gauge{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Gauge) XXX_Size() int { - return xxx_messageInfo_Gauge.Size(m) + +func (x *Gauge) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Gauge) XXX_DiscardUnknown() { - xxx_messageInfo_Gauge.DiscardUnknown(m) + +func (*Gauge) ProtoMessage() {} + +func (x *Gauge) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Gauge proto.InternalMessageInfo +// Deprecated: Use Gauge.ProtoReflect.Descriptor instead. +func (*Gauge) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1} +} -func (m *Gauge) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Gauge) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Counter) Reset() { *m = Counter{} } -func (m *Counter) String() string { return proto.CompactTextString(m) } -func (*Counter) ProtoMessage() {} -func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{2} + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` } -func (m *Counter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Counter.Unmarshal(m, b) -} -func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Counter.Marshal(b, m, deterministic) -} -func (m *Counter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Counter.Merge(m, src) +func (x *Counter) Reset() { + *x = Counter{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Counter) XXX_Size() int { - return xxx_messageInfo_Counter.Size(m) + +func (x *Counter) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Counter) XXX_DiscardUnknown() { - xxx_messageInfo_Counter.DiscardUnknown(m) + +func (*Counter) ProtoMessage() {} + +func (x *Counter) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Counter proto.InternalMessageInfo +// Deprecated: Use Counter.ProtoReflect.Descriptor instead. +func (*Counter) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2} +} -func (m *Counter) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Counter) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } -func (m *Counter) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar +func (x *Counter) GetExemplar() *Exemplar { + if x != nil { + return x.Exemplar } return nil } type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Quantile) Reset() { *m = Quantile{} } -func (m *Quantile) String() string { return proto.CompactTextString(m) } -func (*Quantile) ProtoMessage() {} -func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{3} + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` } -func (m *Quantile) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Quantile.Unmarshal(m, b) -} -func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) -} -func (m *Quantile) XXX_Merge(src proto.Message) { - xxx_messageInfo_Quantile.Merge(m, src) +func (x *Quantile) Reset() { + *x = Quantile{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Quantile) XXX_Size() int { - return xxx_messageInfo_Quantile.Size(m) + +func (x *Quantile) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Quantile) XXX_DiscardUnknown() { - xxx_messageInfo_Quantile.DiscardUnknown(m) + +func (*Quantile) ProtoMessage() {} + +func (x *Quantile) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Quantile proto.InternalMessageInfo +// Deprecated: Use Quantile.ProtoReflect.Descriptor instead. +func (*Quantile) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3} +} -func (m *Quantile) GetQuantile() float64 { - if m != nil && m.Quantile != nil { - return *m.Quantile +func (x *Quantile) GetQuantile() float64 { + if x != nil && x.Quantile != nil { + return *x.Quantile } return 0 } -func (m *Quantile) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Quantile) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Summary) Reset() { *m = Summary{} } -func (m *Summary) String() string { return proto.CompactTextString(m) } -func (*Summary) ProtoMessage() {} -func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{4} + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` } -func (m *Summary) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Summary.Unmarshal(m, b) -} -func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Summary.Marshal(b, m, deterministic) -} -func (m *Summary) XXX_Merge(src proto.Message) { - xxx_messageInfo_Summary.Merge(m, src) +func (x *Summary) Reset() { + *x = Summary{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Summary) XXX_Size() int { - return xxx_messageInfo_Summary.Size(m) + +func (x *Summary) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Summary) XXX_DiscardUnknown() { - xxx_messageInfo_Summary.DiscardUnknown(m) + +func (*Summary) ProtoMessage() {} + +func (x *Summary) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Summary proto.InternalMessageInfo +// Deprecated: Use Summary.ProtoReflect.Descriptor instead. +func (*Summary) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4} +} -func (m *Summary) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount +func (x *Summary) GetSampleCount() uint64 { + if x != nil && x.SampleCount != nil { + return *x.SampleCount } return 0 } -func (m *Summary) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum +func (x *Summary) GetSampleSum() float64 { + if x != nil && x.SampleSum != nil { + return *x.SampleSum } return 0 } -func (m *Summary) GetQuantile() []*Quantile { - if m != nil { - return m.Quantile +func (x *Summary) GetQuantile() []*Quantile { + if x != nil { + return x.Quantile } return nil } type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Untyped) Reset() { *m = Untyped{} } -func (m *Untyped) String() string { return proto.CompactTextString(m) } -func (*Untyped) ProtoMessage() {} -func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{5} + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` } -func (m *Untyped) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Untyped.Unmarshal(m, b) -} -func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) -} -func (m *Untyped) XXX_Merge(src proto.Message) { - xxx_messageInfo_Untyped.Merge(m, src) +func (x *Untyped) Reset() { + *x = Untyped{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Untyped) XXX_Size() int { - return xxx_messageInfo_Untyped.Size(m) + +func (x *Untyped) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Untyped) XXX_DiscardUnknown() { - xxx_messageInfo_Untyped.DiscardUnknown(m) + +func (*Untyped) ProtoMessage() {} + +func (x *Untyped) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Untyped proto.InternalMessageInfo +// Deprecated: Use Untyped.ProtoReflect.Descriptor instead. +func (*Untyped) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5} +} -func (m *Untyped) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Untyped) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Histogram struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0. + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + // Buckets for the conventional histogram. + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional. + // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). + // In the future, more bucket schemas may be added using numbers < -4 or > 8. + Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"` + ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` // Breadth of the zero bucket. + ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` // Count in zero bucket. + ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0. + // Negative buckets for the native histogram. + NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"` + // Use either "negative_delta" or "negative_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket. + // Positive buckets for the native histogram. + PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` + // Use either "positive_delta" or "positive_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket. +} + +func (x *Histogram) Reset() { + *x = Histogram{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Histogram) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Histogram) ProtoMessage() {} + +func (x *Histogram) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *Histogram) Reset() { *m = Histogram{} } -func (m *Histogram) String() string { return proto.CompactTextString(m) } -func (*Histogram) ProtoMessage() {} +// Deprecated: Use Histogram.ProtoReflect.Descriptor instead. func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{6} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6} } -func (m *Histogram) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Histogram.Unmarshal(m, b) +func (x *Histogram) GetSampleCount() uint64 { + if x != nil && x.SampleCount != nil { + return *x.SampleCount + } + return 0 } -func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) + +func (x *Histogram) GetSampleCountFloat() float64 { + if x != nil && x.SampleCountFloat != nil { + return *x.SampleCountFloat + } + return 0 } -func (m *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(m, src) + +func (x *Histogram) GetSampleSum() float64 { + if x != nil && x.SampleSum != nil { + return *x.SampleSum + } + return 0 } -func (m *Histogram) XXX_Size() int { - return xxx_messageInfo_Histogram.Size(m) + +func (x *Histogram) GetBucket() []*Bucket { + if x != nil { + return x.Bucket + } + return nil } -func (m *Histogram) XXX_DiscardUnknown() { - xxx_messageInfo_Histogram.DiscardUnknown(m) + +func (x *Histogram) GetSchema() int32 { + if x != nil && x.Schema != nil { + return *x.Schema + } + return 0 } -var xxx_messageInfo_Histogram proto.InternalMessageInfo +func (x *Histogram) GetZeroThreshold() float64 { + if x != nil && x.ZeroThreshold != nil { + return *x.ZeroThreshold + } + return 0 +} -func (m *Histogram) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount +func (x *Histogram) GetZeroCount() uint64 { + if x != nil && x.ZeroCount != nil { + return *x.ZeroCount } return 0 } -func (m *Histogram) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum +func (x *Histogram) GetZeroCountFloat() float64 { + if x != nil && x.ZeroCountFloat != nil { + return *x.ZeroCountFloat } return 0 } -func (m *Histogram) GetBucket() []*Bucket { - if m != nil { - return m.Bucket +func (x *Histogram) GetNegativeSpan() []*BucketSpan { + if x != nil { + return x.NegativeSpan } return nil } -type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *Histogram) GetNegativeDelta() []int64 { + if x != nil { + return x.NegativeDelta + } + return nil } -func (m *Bucket) Reset() { *m = Bucket{} } -func (m *Bucket) String() string { return proto.CompactTextString(m) } -func (*Bucket) ProtoMessage() {} -func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{7} +func (x *Histogram) GetNegativeCount() []float64 { + if x != nil { + return x.NegativeCount + } + return nil +} + +func (x *Histogram) GetPositiveSpan() []*BucketSpan { + if x != nil { + return x.PositiveSpan + } + return nil +} + +func (x *Histogram) GetPositiveDelta() []int64 { + if x != nil { + return x.PositiveDelta + } + return nil +} + +func (x *Histogram) GetPositiveCount() []float64 { + if x != nil { + return x.PositiveCount + } + return nil } -func (m *Bucket) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Bucket.Unmarshal(m, b) +// A Bucket of a conventional histogram, each of which is treated as +// an individual counter-like time series by Prometheus. +type Bucket struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` // Cumulative in increasing order. + CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0. + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` // Inclusive. + Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` } -func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) + +func (x *Bucket) Reset() { + *x = Bucket{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bucket.Merge(m, src) + +func (x *Bucket) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Bucket) XXX_Size() int { - return xxx_messageInfo_Bucket.Size(m) + +func (*Bucket) ProtoMessage() {} + +func (x *Bucket) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_Bucket.DiscardUnknown(m) + +// Deprecated: Use Bucket.ProtoReflect.Descriptor instead. +func (*Bucket) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7} } -var xxx_messageInfo_Bucket proto.InternalMessageInfo +func (x *Bucket) GetCumulativeCount() uint64 { + if x != nil && x.CumulativeCount != nil { + return *x.CumulativeCount + } + return 0 +} -func (m *Bucket) GetCumulativeCount() uint64 { - if m != nil && m.CumulativeCount != nil { - return *m.CumulativeCount +func (x *Bucket) GetCumulativeCountFloat() float64 { + if x != nil && x.CumulativeCountFloat != nil { + return *x.CumulativeCountFloat } return 0 } -func (m *Bucket) GetUpperBound() float64 { - if m != nil && m.UpperBound != nil { - return *m.UpperBound +func (x *Bucket) GetUpperBound() float64 { + if x != nil && x.UpperBound != nil { + return *x.UpperBound } return 0 } -func (m *Bucket) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar +func (x *Bucket) GetExemplar() *Exemplar { + if x != nil { + return x.Exemplar } return nil } -type Exemplar struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +// A BucketSpan defines a number of consecutive buckets in a native +// histogram with their offset. Logically, it would be more +// straightforward to include the bucket counts in the Span. However, +// the protobuf representation is more compact in the way the data is +// structured here (with all the buckets in a single array separate +// from the Spans). +type BucketSpan struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative). + Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` // Length of consecutive buckets. } -func (m *Exemplar) Reset() { *m = Exemplar{} } -func (m *Exemplar) String() string { return proto.CompactTextString(m) } -func (*Exemplar) ProtoMessage() {} -func (*Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{8} +func (x *BucketSpan) Reset() { + *x = BucketSpan{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BucketSpan) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BucketSpan) ProtoMessage() {} + +func (x *BucketSpan) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead. +func (*BucketSpan) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8} +} + +func (x *BucketSpan) GetOffset() int32 { + if x != nil && x.Offset != nil { + return *x.Offset + } + return 0 } -func (m *Exemplar) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Exemplar.Unmarshal(m, b) +func (x *BucketSpan) GetLength() uint32 { + if x != nil && x.Length != nil { + return *x.Length + } + return 0 } -func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) + +type Exemplar struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style. } -func (m *Exemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_Exemplar.Merge(m, src) + +func (x *Exemplar) Reset() { + *x = Exemplar{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Exemplar) XXX_Size() int { - return xxx_messageInfo_Exemplar.Size(m) + +func (x *Exemplar) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Exemplar) XXX_DiscardUnknown() { - xxx_messageInfo_Exemplar.DiscardUnknown(m) + +func (*Exemplar) ProtoMessage() {} + +func (x *Exemplar) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Exemplar proto.InternalMessageInfo +// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead. +func (*Exemplar) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9} +} -func (m *Exemplar) GetLabel() []*LabelPair { - if m != nil { - return m.Label +func (x *Exemplar) GetLabel() []*LabelPair { + if x != nil { + return x.Label } return nil } -func (m *Exemplar) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Exemplar) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } -func (m *Exemplar) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp +func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp } return nil } type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{9} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` +} + +func (x *Metric) Reset() { + *x = Metric{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Metric) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metric.Unmarshal(m, b) -} -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +func (x *Metric) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return xxx_messageInfo_Metric.Size(m) -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) + +func (*Metric) ProtoMessage() {} + +func (x *Metric) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Metric proto.InternalMessageInfo +// Deprecated: Use Metric.ProtoReflect.Descriptor instead. +func (*Metric) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10} +} -func (m *Metric) GetLabel() []*LabelPair { - if m != nil { - return m.Label +func (x *Metric) GetLabel() []*LabelPair { + if x != nil { + return x.Label } return nil } -func (m *Metric) GetGauge() *Gauge { - if m != nil { - return m.Gauge +func (x *Metric) GetGauge() *Gauge { + if x != nil { + return x.Gauge } return nil } -func (m *Metric) GetCounter() *Counter { - if m != nil { - return m.Counter +func (x *Metric) GetCounter() *Counter { + if x != nil { + return x.Counter } return nil } -func (m *Metric) GetSummary() *Summary { - if m != nil { - return m.Summary +func (x *Metric) GetSummary() *Summary { + if x != nil { + return x.Summary } return nil } -func (m *Metric) GetUntyped() *Untyped { - if m != nil { - return m.Untyped +func (x *Metric) GetUntyped() *Untyped { + if x != nil { + return x.Untyped } return nil } -func (m *Metric) GetHistogram() *Histogram { - if m != nil { - return m.Histogram +func (x *Metric) GetHistogram() *Histogram { + if x != nil { + return x.Histogram } return nil } -func (m *Metric) GetTimestampMs() int64 { - if m != nil && m.TimestampMs != nil { - return *m.TimestampMs +func (x *Metric) GetTimestampMs() int64 { + if x != nil && x.TimestampMs != nil { + return *x.TimestampMs } return 0 } type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricFamily) Reset() { *m = MetricFamily{} } -func (m *MetricFamily) String() string { return proto.CompactTextString(m) } -func (*MetricFamily) ProtoMessage() {} -func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{10} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` +} + +func (x *MetricFamily) Reset() { + *x = MetricFamily{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *MetricFamily) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricFamily.Unmarshal(m, b) +func (x *MetricFamily) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) -} -func (m *MetricFamily) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricFamily.Merge(m, src) -} -func (m *MetricFamily) XXX_Size() int { - return xxx_messageInfo_MetricFamily.Size(m) -} -func (m *MetricFamily) XXX_DiscardUnknown() { - xxx_messageInfo_MetricFamily.DiscardUnknown(m) + +func (*MetricFamily) ProtoMessage() {} + +func (x *MetricFamily) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_MetricFamily proto.InternalMessageInfo +// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead. +func (*MetricFamily) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11} +} -func (m *MetricFamily) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (x *MetricFamily) GetName() string { + if x != nil && x.Name != nil { + return *x.Name } return "" } -func (m *MetricFamily) GetHelp() string { - if m != nil && m.Help != nil { - return *m.Help +func (x *MetricFamily) GetHelp() string { + if x != nil && x.Help != nil { + return *x.Help } return "" } -func (m *MetricFamily) GetType() MetricType { - if m != nil && m.Type != nil { - return *m.Type +func (x *MetricFamily) GetType() MetricType { + if x != nil && x.Type != nil { + return *x.Type } return MetricType_COUNTER } -func (m *MetricFamily) GetMetric() []*Metric { - if m != nil { - return m.Metric +func (x *MetricFamily) GetMetric() []*Metric { + if x != nil { + return x.Metric } return nil } -func init() { - proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) - proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") - proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") - proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") - proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") - proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") - proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") - proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") - proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") - proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") - proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") - proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") -} - -func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) } - -var fileDescriptor_6039342a2ba47b72 = []byte{ - // 665 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, - 0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55, - 0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2, - 0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e, - 0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa, - 0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66, - 0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4, - 0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45, - 0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a, - 0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d, - 0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b, - 0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22, - 0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79, - 0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0, - 0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00, - 0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01, - 0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe, - 0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55, - 0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f, - 0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31, - 0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16, - 0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e, - 0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c, - 0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f, - 0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57, - 0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64, - 0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76, - 0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7, - 0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95, - 0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed, - 0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33, - 0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07, - 0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72, - 0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56, - 0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6, - 0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f, - 0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f, - 0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27, - 0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83, - 0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24, - 0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00, +var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor + +var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0x5b, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, + 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, + 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x87, 0x01, 0x0a, + 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, + 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe3, 0x04, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74, + 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, + 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, + 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, + 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, + 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, + 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, + 0x6f, 0x61, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x73, 0x70, 0x61, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, + 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, + 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, + 0x6e, 0x52, 0x0c, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, + 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, + 0x61, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, + 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, + 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, + 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, + 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, + 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, + 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, + 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, + 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, + 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, + 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, + 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, + 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, + 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, + 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, + 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, + 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, + 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, + 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, + 0x62, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, + 0x07, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, + 0x55, 0x47, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, + 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, + 0x0d, 0x0a, 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, + 0x0a, 0x0f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, + 0x4d, 0x10, 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, + 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, + 0x75, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, + 0x67, 0x6f, 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, +} + +var ( + file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once + file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc +) + +func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte { + file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() { + file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData) + }) + return file_io_prometheus_client_metrics_proto_rawDescData +} + +var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{ + (MetricType)(0), // 0: io.prometheus.client.MetricType + (*LabelPair)(nil), // 1: io.prometheus.client.LabelPair + (*Gauge)(nil), // 2: io.prometheus.client.Gauge + (*Counter)(nil), // 3: io.prometheus.client.Counter + (*Quantile)(nil), // 4: io.prometheus.client.Quantile + (*Summary)(nil), // 5: io.prometheus.client.Summary + (*Untyped)(nil), // 6: io.prometheus.client.Untyped + (*Histogram)(nil), // 7: io.prometheus.client.Histogram + (*Bucket)(nil), // 8: io.prometheus.client.Bucket + (*BucketSpan)(nil), // 9: io.prometheus.client.BucketSpan + (*Exemplar)(nil), // 10: io.prometheus.client.Exemplar + (*Metric)(nil), // 11: io.prometheus.client.Metric + (*MetricFamily)(nil), // 12: io.prometheus.client.MetricFamily + (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp +} +var file_io_prometheus_client_metrics_proto_depIdxs = []int32{ + 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar + 4, // 1: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile + 8, // 2: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket + 9, // 3: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan + 9, // 4: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan + 10, // 5: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar + 1, // 6: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair + 13, // 7: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp + 1, // 8: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair + 2, // 9: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge + 3, // 10: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter + 5, // 11: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary + 6, // 12: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped + 7, // 13: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram + 0, // 14: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType + 11, // 15: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric + 16, // [16:16] is the sub-list for method output_type + 16, // [16:16] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name +} + +func init() { file_io_prometheus_client_metrics_proto_init() } +func file_io_prometheus_client_metrics_proto_init() { + if File_io_prometheus_client_metrics_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LabelPair); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Gauge); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Counter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Quantile); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Summary); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Untyped); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Histogram); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BucketSpan); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Exemplar); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metric); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricFamily); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc, + NumEnums: 1, + NumMessages: 12, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_io_prometheus_client_metrics_proto_goTypes, + DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs, + EnumInfos: file_io_prometheus_client_metrics_proto_enumTypes, + MessageInfos: file_io_prometheus_client_metrics_proto_msgTypes, + }.Build() + File_io_prometheus_client_metrics_proto = out.File + file_io_prometheus_client_metrics_proto_rawDesc = nil + file_io_prometheus_client_metrics_proto_goTypes = nil + file_io_prometheus_client_metrics_proto_depIdxs = nil } diff --git a/vendor/github.com/prometheus/common/config/config.go b/vendor/github.com/prometheus/common/config/config.go index fffda4a7e..0b91f20d5 100644 --- a/vendor/github.com/prometheus/common/config/config.go +++ b/vendor/github.com/prometheus/common/config/config.go @@ -18,6 +18,7 @@ package config import ( "encoding/json" + "net/http" "path/filepath" ) @@ -34,7 +35,7 @@ func (s Secret) MarshalYAML() (interface{}, error) { return nil, nil } -//UnmarshalYAML implements the yaml.Unmarshaler interface for Secrets. +// UnmarshalYAML implements the yaml.Unmarshaler interface for Secrets. func (s *Secret) UnmarshalYAML(unmarshal func(interface{}) error) error { type plain Secret return unmarshal((*plain)(s)) @@ -48,6 +49,29 @@ func (s Secret) MarshalJSON() ([]byte, error) { return json.Marshal(secretToken) } +type Header map[string][]Secret + +func (h *Header) HTTPHeader() http.Header { + if h == nil || *h == nil { + return nil + } + + header := make(http.Header) + + for name, values := range *h { + var s []string + if values != nil { + s = make([]string, 0, len(values)) + for _, value := range values { + s = append(s, string(value)) + } + } + header[name] = s + } + + return header +} + // DirectorySetter is a config type that contains file paths that may // be relative to the file containing the config. type DirectorySetter interface { diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go index 6ca9e2fce..6326afe0e 100644 --- a/vendor/github.com/prometheus/common/config/http_config.go +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -11,8 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build go1.8 - package config import ( @@ -23,40 +21,111 @@ import ( "crypto/x509" "encoding/json" "fmt" - "io/ioutil" "net" "net/http" "net/url" "os" + "path/filepath" "strings" "sync" "time" "github.com/mwitkow/go-conntrack" + "golang.org/x/net/http/httpproxy" "golang.org/x/net/http2" "golang.org/x/oauth2" "golang.org/x/oauth2/clientcredentials" "gopkg.in/yaml.v2" ) -// DefaultHTTPClientConfig is the default HTTP client configuration. -var DefaultHTTPClientConfig = HTTPClientConfig{ - FollowRedirects: true, -} +var ( + // DefaultHTTPClientConfig is the default HTTP client configuration. + DefaultHTTPClientConfig = HTTPClientConfig{ + FollowRedirects: true, + EnableHTTP2: true, + } -// defaultHTTPClientOptions holds the default HTTP client options. -var defaultHTTPClientOptions = httpClientOptions{ - keepAlivesEnabled: true, - http2Enabled: true, - // 5 minutes is typically above the maximum sane scrape interval. So we can - // use keepalive for all configurations. - idleConnTimeout: 5 * time.Minute, -} + // defaultHTTPClientOptions holds the default HTTP client options. + defaultHTTPClientOptions = httpClientOptions{ + keepAlivesEnabled: true, + http2Enabled: true, + // 5 minutes is typically above the maximum sane scrape interval. So we can + // use keepalive for all configurations. + idleConnTimeout: 5 * time.Minute, + } +) type closeIdler interface { CloseIdleConnections() } +type TLSVersion uint16 + +var TLSVersions = map[string]TLSVersion{ + "TLS13": (TLSVersion)(tls.VersionTLS13), + "TLS12": (TLSVersion)(tls.VersionTLS12), + "TLS11": (TLSVersion)(tls.VersionTLS11), + "TLS10": (TLSVersion)(tls.VersionTLS10), +} + +func (tv *TLSVersion) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + err := unmarshal((*string)(&s)) + if err != nil { + return err + } + if v, ok := TLSVersions[s]; ok { + *tv = v + return nil + } + return fmt.Errorf("unknown TLS version: %s", s) +} + +func (tv TLSVersion) MarshalYAML() (interface{}, error) { + for s, v := range TLSVersions { + if tv == v { + return s, nil + } + } + return nil, fmt.Errorf("unknown TLS version: %d", tv) +} + +// MarshalJSON implements the json.Unmarshaler interface for TLSVersion. +func (tv *TLSVersion) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if v, ok := TLSVersions[s]; ok { + *tv = v + return nil + } + return fmt.Errorf("unknown TLS version: %s", s) +} + +// MarshalJSON implements the json.Marshaler interface for TLSVersion. +func (tv TLSVersion) MarshalJSON() ([]byte, error) { + for s, v := range TLSVersions { + if tv == v { + return json.Marshal(s) + } + } + return nil, fmt.Errorf("unknown TLS version: %d", tv) +} + +// String implements the fmt.Stringer interface for TLSVersion. +func (tv *TLSVersion) String() string { + if tv == nil || *tv == 0 { + return "" + } + for s, v := range TLSVersions { + if *tv == v { + return s + } + } + return fmt.Sprintf("%d", tv) +} + // BasicAuth contains basic HTTP authentication credentials. type BasicAuth struct { Username string `yaml:"username" json:"username"` @@ -159,6 +228,26 @@ type OAuth2 struct { Scopes []string `yaml:"scopes,omitempty" json:"scopes,omitempty"` TokenURL string `yaml:"token_url" json:"token_url"` EndpointParams map[string]string `yaml:"endpoint_params,omitempty" json:"endpoint_params,omitempty"` + TLSConfig TLSConfig `yaml:"tls_config,omitempty"` + ProxyConfig `yaml:",inline"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface +func (o *OAuth2) UnmarshalYAML(unmarshal func(interface{}) error) error { + type plain OAuth2 + if err := unmarshal((*plain)(o)); err != nil { + return err + } + return o.ProxyConfig.Validate() +} + +// UnmarshalJSON implements the json.Marshaler interface for URL. +func (o *OAuth2) UnmarshalJSON(data []byte) error { + type plain OAuth2 + if err := json.Unmarshal(data, (*plain)(o)); err != nil { + return err + } + return o.ProxyConfig.Validate() } // SetDirectory joins any relative file paths with dir. @@ -167,6 +256,31 @@ func (a *OAuth2) SetDirectory(dir string) { return } a.ClientSecretFile = JoinDir(dir, a.ClientSecretFile) + a.TLSConfig.SetDirectory(dir) +} + +// LoadHTTPConfig parses the YAML input s into a HTTPClientConfig. +func LoadHTTPConfig(s string) (*HTTPClientConfig, error) { + cfg := &HTTPClientConfig{} + err := yaml.UnmarshalStrict([]byte(s), cfg) + if err != nil { + return nil, err + } + return cfg, nil +} + +// LoadHTTPConfigFile parses the given YAML file into a HTTPClientConfig. +func LoadHTTPConfigFile(filename string) (*HTTPClientConfig, []byte, error) { + content, err := os.ReadFile(filename) + if err != nil { + return nil, nil, err + } + cfg, err := LoadHTTPConfig(string(content)) + if err != nil { + return nil, nil, err + } + cfg.SetDirectory(filepath.Dir(filepath.Dir(filename))) + return cfg, content, nil } // HTTPClientConfig configures an HTTP client. @@ -183,14 +297,18 @@ type HTTPClientConfig struct { // The bearer token file for the targets. Deprecated in favour of // Authorization.CredentialsFile. BearerTokenFile string `yaml:"bearer_token_file,omitempty" json:"bearer_token_file,omitempty"` - // HTTP proxy server to use to connect to the targets. - ProxyURL URL `yaml:"proxy_url,omitempty" json:"proxy_url,omitempty"` // TLSConfig to use to connect to the targets. TLSConfig TLSConfig `yaml:"tls_config,omitempty" json:"tls_config,omitempty"` // FollowRedirects specifies whether the client should follow HTTP 3xx redirects. // The omitempty flag is not set, because it would be hidden from the // marshalled configuration when set to false. FollowRedirects bool `yaml:"follow_redirects" json:"follow_redirects"` + // EnableHTTP2 specifies whether the client should configure HTTP2. + // The omitempty flag is not set, because it would be hidden from the + // marshalled configuration when set to false. + EnableHTTP2 bool `yaml:"enable_http2" json:"enable_http2"` + // Proxy configuration. + ProxyConfig `yaml:",inline"` } // SetDirectory joins any relative file paths with dir. @@ -206,7 +324,8 @@ func (c *HTTPClientConfig) SetDirectory(dir string) { } // Validate validates the HTTPClientConfig to check only one of BearerToken, -// BasicAuth and BearerTokenFile is configured. +// BasicAuth and BearerTokenFile is configured. It also validates that ProxyURL +// is set if ProxyConnectHeader is set. func (c *HTTPClientConfig) Validate() error { // Backwards compatibility with the bearer_token field. if len(c.BearerToken) > 0 && len(c.BearerTokenFile) > 0 { @@ -268,6 +387,9 @@ func (c *HTTPClientConfig) Validate() error { if c.ProxyURL.URL != nil && *c.ProxyURL.URL == (url.URL{}) { c.ProxyURL.URL = nil } + if err := c.ProxyConfig.Validate(); err != nil { + return err + } return nil } @@ -306,6 +428,7 @@ type httpClientOptions struct { keepAlivesEnabled bool http2Enabled bool idleConnTimeout time.Duration + userAgent string } // HTTPClientOption defines an option that can be applied to the HTTP client. @@ -339,6 +462,13 @@ func WithIdleConnTimeout(timeout time.Duration) HTTPClientOption { } } +// WithUserAgent allows setting the user agent. +func WithUserAgent(ua string) HTTPClientOption { + return func(opts *httpClientOptions) { + opts.userAgent = ua + } +} + // NewClient returns a http.Client using the specified http.RoundTripper. func newClient(rt http.RoundTripper) *http.Client { return &http.Client{Transport: rt} @@ -387,7 +517,8 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT // The only timeout we care about is the configured scrape timeout. // It is applied on request. So we leave out any timings here. var rt http.RoundTripper = &http.Transport{ - Proxy: http.ProxyURL(cfg.ProxyURL.URL), + Proxy: cfg.ProxyConfig.Proxy(), + ProxyConnectHeader: cfg.ProxyConfig.GetProxyConnectHeader(), MaxIdleConns: 20000, MaxIdleConnsPerHost: 1000, // see https://github.com/golang/go/issues/13801 DisableKeepAlives: !opts.keepAlivesEnabled, @@ -398,19 +529,13 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT ExpectContinueTimeout: 1 * time.Second, DialContext: dialContext, } - if opts.http2Enabled && os.Getenv("PROMETHEUS_COMMON_DISABLE_HTTP2") == "" { + if opts.http2Enabled && cfg.EnableHTTP2 { // HTTP/2 support is golang had many problematic cornercases where // dead connections would be kept and used in connection pools. // https://github.com/golang/go/issues/32388 // https://github.com/golang/go/issues/39337 // https://github.com/golang/go/issues/39750 - // Do not enable HTTP2 if the environment variable - // PROMETHEUS_COMMON_DISABLE_HTTP2 is set to a non-empty value. - // This allows users to easily disable HTTP2 in case they run into - // issues again, but will be removed once we are confident that - // things work as expected. - http2t, err := http2.ConfigureTransports(rt.(*http.Transport)) if err != nil { return nil, err @@ -438,8 +563,13 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT } if cfg.OAuth2 != nil { - rt = NewOAuth2RoundTripper(cfg.OAuth2, rt) + rt = NewOAuth2RoundTripper(cfg.OAuth2, rt, &opts) + } + + if opts.userAgent != "" { + rt = NewUserAgentRoundTripper(opts.userAgent, rt) } + // Return a new configured RoundTripper. return rt, nil } @@ -453,8 +583,7 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT // No need for a RoundTripper that reloads the CA file automatically. return newRT(tlsConfig) } - - return NewTLSRoundTripper(tlsConfig, cfg.TLSConfig.CAFile, newRT) + return NewTLSRoundTripper(tlsConfig, cfg.TLSConfig.roundTripperSettings(), newRT) } type authorizationCredentialsRoundTripper struct { @@ -498,7 +627,7 @@ func NewAuthorizationCredentialsFileRoundTripper(authType, authCredentialsFile s func (rt *authorizationCredentialsFileRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { if len(req.Header.Get("Authorization")) == 0 { - b, err := ioutil.ReadFile(rt.authCredentialsFile) + b, err := os.ReadFile(rt.authCredentialsFile) if err != nil { return nil, fmt.Errorf("unable to read authorization credentials file %s: %s", rt.authCredentialsFile, err) } @@ -536,7 +665,7 @@ func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, e } req = cloneRequest(req) if rt.passwordFile != "" { - bs, err := ioutil.ReadFile(rt.passwordFile) + bs, err := os.ReadFile(rt.passwordFile) if err != nil { return nil, fmt.Errorf("unable to read basic auth password file %s: %s", rt.passwordFile, err) } @@ -559,12 +688,15 @@ type oauth2RoundTripper struct { next http.RoundTripper secret string mtx sync.RWMutex + opts *httpClientOptions + client *http.Client } -func NewOAuth2RoundTripper(config *OAuth2, next http.RoundTripper) http.RoundTripper { +func NewOAuth2RoundTripper(config *OAuth2, next http.RoundTripper, opts *httpClientOptions) http.RoundTripper { return &oauth2RoundTripper{ config: config, next: next, + opts: opts, } } @@ -575,7 +707,7 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro ) if rt.config.ClientSecretFile != "" { - data, err := ioutil.ReadFile(rt.config.ClientSecretFile) + data, err := os.ReadFile(rt.config.ClientSecretFile) if err != nil { return nil, fmt.Errorf("unable to read oauth2 client secret file %s: %s", rt.config.ClientSecretFile, err) } @@ -598,7 +730,42 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro EndpointParams: mapToValues(rt.config.EndpointParams), } - tokenSource := config.TokenSource(context.Background()) + tlsConfig, err := NewTLSConfig(&rt.config.TLSConfig) + if err != nil { + return nil, err + } + + tlsTransport := func(tlsConfig *tls.Config) (http.RoundTripper, error) { + return &http.Transport{ + TLSClientConfig: tlsConfig, + Proxy: rt.config.ProxyConfig.Proxy(), + ProxyConnectHeader: rt.config.ProxyConfig.GetProxyConnectHeader(), + DisableKeepAlives: !rt.opts.keepAlivesEnabled, + MaxIdleConns: 20, + MaxIdleConnsPerHost: 1, // see https://github.com/golang/go/issues/13801 + IdleConnTimeout: 10 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + }, nil + } + + var t http.RoundTripper + if len(rt.config.TLSConfig.CAFile) == 0 { + t, _ = tlsTransport(tlsConfig) + } else { + t, err = NewTLSRoundTripper(tlsConfig, rt.config.TLSConfig.roundTripperSettings(), tlsTransport) + if err != nil { + return nil, err + } + } + + if ua := req.UserAgent(); ua != "" { + t = NewUserAgentRoundTripper(ua, t) + } + + client := &http.Client{Transport: t} + ctx := context.WithValue(context.Background(), oauth2.HTTPClient, client) + tokenSource := config.TokenSource(ctx) rt.mtx.Lock() rt.secret = secret @@ -606,6 +773,10 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro Base: rt.next, Source: tokenSource, } + if rt.client != nil { + rt.client.CloseIdleConnections() + } + rt.client = client rt.mtx.Unlock() } @@ -616,7 +787,9 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro } func (rt *oauth2RoundTripper) CloseIdleConnections() { - // OAuth2 RT does not support CloseIdleConnections() but the next RT might. + if rt.client != nil { + rt.client.CloseIdleConnections() + } if ci, ok := rt.next.(closeIdler); ok { ci.CloseIdleConnections() } @@ -647,11 +820,29 @@ func cloneRequest(r *http.Request) *http.Request { // NewTLSConfig creates a new tls.Config from the given TLSConfig. func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { - tlsConfig := &tls.Config{InsecureSkipVerify: cfg.InsecureSkipVerify} + if err := cfg.Validate(); err != nil { + return nil, err + } + + tlsConfig := &tls.Config{ + InsecureSkipVerify: cfg.InsecureSkipVerify, + MinVersion: uint16(cfg.MinVersion), + MaxVersion: uint16(cfg.MaxVersion), + } + + if cfg.MaxVersion != 0 && cfg.MinVersion != 0 { + if cfg.MaxVersion < cfg.MinVersion { + return nil, fmt.Errorf("tls_config.max_version must be greater than or equal to tls_config.min_version if both are specified") + } + } // If a CA cert is provided then let's read it in so we can validate the // scrape target's certificate properly. - if len(cfg.CAFile) > 0 { + if len(cfg.CA) > 0 { + if !updateRootCA(tlsConfig, []byte(cfg.CA)) { + return nil, fmt.Errorf("unable to use inline CA cert") + } + } else if len(cfg.CAFile) > 0 { b, err := readCAFile(cfg.CAFile) if err != nil { return nil, err @@ -664,12 +855,9 @@ func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { if len(cfg.ServerName) > 0 { tlsConfig.ServerName = cfg.ServerName } + // If a client cert & key is provided then configure TLS config accordingly. - if len(cfg.CertFile) > 0 && len(cfg.KeyFile) == 0 { - return nil, fmt.Errorf("client cert file %q specified without client key file", cfg.CertFile) - } else if len(cfg.KeyFile) > 0 && len(cfg.CertFile) == 0 { - return nil, fmt.Errorf("client key file %q specified without client cert file", cfg.KeyFile) - } else if len(cfg.CertFile) > 0 && len(cfg.KeyFile) > 0 { + if cfg.usingClientCert() && cfg.usingClientKey() { // Verify that client cert and key are valid. if _, err := cfg.getClientCertificate(nil); err != nil { return nil, err @@ -682,6 +870,12 @@ func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { // TLSConfig configures the options for TLS connections. type TLSConfig struct { + // Text of the CA cert to use for the targets. + CA string `yaml:"ca,omitempty" json:"ca,omitempty"` + // Text of the client cert file for the targets. + Cert string `yaml:"cert,omitempty" json:"cert,omitempty"` + // Text of the client key file for the targets. + Key Secret `yaml:"key,omitempty" json:"key,omitempty"` // The CA cert to use for the targets. CAFile string `yaml:"ca_file,omitempty" json:"ca_file,omitempty"` // The client cert file for the targets. @@ -692,6 +886,10 @@ type TLSConfig struct { ServerName string `yaml:"server_name,omitempty" json:"server_name,omitempty"` // Disable target certificate validation. InsecureSkipVerify bool `yaml:"insecure_skip_verify" json:"insecure_skip_verify"` + // Minimum TLS version. + MinVersion TLSVersion `yaml:"min_version,omitempty" json:"min_version,omitempty"` + // Maximum TLS version. + MaxVersion TLSVersion `yaml:"max_version,omitempty" json:"max_version,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -707,21 +905,90 @@ func (c *TLSConfig) SetDirectory(dir string) { // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { type plain TLSConfig - return unmarshal((*plain)(c)) + if err := unmarshal((*plain)(c)); err != nil { + return err + } + return c.Validate() +} + +// Validate validates the TLSConfig to check that only one of the inlined or +// file-based fields for the TLS CA, client certificate, and client key are +// used. +func (c *TLSConfig) Validate() error { + if len(c.CA) > 0 && len(c.CAFile) > 0 { + return fmt.Errorf("at most one of ca and ca_file must be configured") + } + if len(c.Cert) > 0 && len(c.CertFile) > 0 { + return fmt.Errorf("at most one of cert and cert_file must be configured") + } + if len(c.Key) > 0 && len(c.KeyFile) > 0 { + return fmt.Errorf("at most one of key and key_file must be configured") + } + + if c.usingClientCert() && !c.usingClientKey() { + return fmt.Errorf("exactly one of key or key_file must be configured when a client certificate is configured") + } else if c.usingClientKey() && !c.usingClientCert() { + return fmt.Errorf("exactly one of cert or cert_file must be configured when a client key is configured") + } + + return nil +} + +func (c *TLSConfig) usingClientCert() bool { + return len(c.Cert) > 0 || len(c.CertFile) > 0 +} + +func (c *TLSConfig) usingClientKey() bool { + return len(c.Key) > 0 || len(c.KeyFile) > 0 +} + +func (c *TLSConfig) roundTripperSettings() TLSRoundTripperSettings { + return TLSRoundTripperSettings{ + CA: c.CA, + CAFile: c.CAFile, + Cert: c.Cert, + CertFile: c.CertFile, + Key: string(c.Key), + KeyFile: c.KeyFile, + } } // getClientCertificate reads the pair of client cert and key from disk and returns a tls.Certificate. -func (c *TLSConfig) getClientCertificate(*tls.CertificateRequestInfo) (*tls.Certificate, error) { - cert, err := tls.LoadX509KeyPair(c.CertFile, c.KeyFile) +func (c *TLSConfig) getClientCertificate(_ *tls.CertificateRequestInfo) (*tls.Certificate, error) { + var ( + certData, keyData []byte + err error + ) + + if c.CertFile != "" { + certData, err = os.ReadFile(c.CertFile) + if err != nil { + return nil, fmt.Errorf("unable to read specified client cert (%s): %s", c.CertFile, err) + } + } else { + certData = []byte(c.Cert) + } + + if c.KeyFile != "" { + keyData, err = os.ReadFile(c.KeyFile) + if err != nil { + return nil, fmt.Errorf("unable to read specified client key (%s): %s", c.KeyFile, err) + } + } else { + keyData = []byte(c.Key) + } + + cert, err := tls.X509KeyPair(certData, keyData) if err != nil { return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err) } + return &cert, nil } // readCAFile reads the CA cert file from disk. func readCAFile(f string) ([]byte, error) { - data, err := ioutil.ReadFile(f) + data, err := os.ReadFile(f) if err != nil { return nil, fmt.Errorf("unable to load specified CA cert %s: %s", f, err) } @@ -741,23 +1008,32 @@ func updateRootCA(cfg *tls.Config, b []byte) bool { // tlsRoundTripper is a RoundTripper that updates automatically its TLS // configuration whenever the content of the CA file changes. type tlsRoundTripper struct { - caFile string + settings TLSRoundTripperSettings + // newRT returns a new RoundTripper. newRT func(*tls.Config) (http.RoundTripper, error) - mtx sync.RWMutex - rt http.RoundTripper - hashCAFile []byte - tlsConfig *tls.Config + mtx sync.RWMutex + rt http.RoundTripper + hashCAData []byte + hashCertData []byte + hashKeyData []byte + tlsConfig *tls.Config +} + +type TLSRoundTripperSettings struct { + CA, CAFile string + Cert, CertFile string + Key, KeyFile string } func NewTLSRoundTripper( cfg *tls.Config, - caFile string, + settings TLSRoundTripperSettings, newRT func(*tls.Config) (http.RoundTripper, error), ) (http.RoundTripper, error) { t := &tlsRoundTripper{ - caFile: caFile, + settings: settings, newRT: newRT, tlsConfig: cfg, } @@ -767,8 +1043,7 @@ func NewTLSRoundTripper( return nil, err } t.rt = rt - - _, t.hashCAFile, err = t.getCAWithHash() + _, t.hashCAData, t.hashCertData, t.hashKeyData, err = t.getTLSDataWithHash() if err != nil { return nil, err } @@ -776,25 +1051,66 @@ func NewTLSRoundTripper( return t, nil } -func (t *tlsRoundTripper) getCAWithHash() ([]byte, []byte, error) { - b, err := readCAFile(t.caFile) - if err != nil { - return nil, nil, err +func (t *tlsRoundTripper) getTLSDataWithHash() ([]byte, []byte, []byte, []byte, error) { + var ( + caBytes, certBytes, keyBytes []byte + + err error + ) + + if t.settings.CAFile != "" { + caBytes, err = os.ReadFile(t.settings.CAFile) + if err != nil { + return nil, nil, nil, nil, err + } + } else if t.settings.CA != "" { + caBytes = []byte(t.settings.CA) + } + + if t.settings.CertFile != "" { + certBytes, err = os.ReadFile(t.settings.CertFile) + if err != nil { + return nil, nil, nil, nil, err + } + } else if t.settings.Cert != "" { + certBytes = []byte(t.settings.Cert) } - h := sha256.Sum256(b) - return b, h[:], nil + if t.settings.KeyFile != "" { + keyBytes, err = os.ReadFile(t.settings.KeyFile) + if err != nil { + return nil, nil, nil, nil, err + } + } else if t.settings.Key != "" { + keyBytes = []byte(t.settings.Key) + } + + var caHash, certHash, keyHash [32]byte + + if len(caBytes) > 0 { + caHash = sha256.Sum256(caBytes) + } + if len(certBytes) > 0 { + certHash = sha256.Sum256(certBytes) + } + if len(keyBytes) > 0 { + keyHash = sha256.Sum256(keyBytes) + } + + return caBytes, caHash[:], certHash[:], keyHash[:], nil } // RoundTrip implements the http.RoundTrip interface. func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - b, h, err := t.getCAWithHash() + caData, caHash, certHash, keyHash, err := t.getTLSDataWithHash() if err != nil { return nil, err } t.mtx.RLock() - equal := bytes.Equal(h[:], t.hashCAFile) + equal := bytes.Equal(caHash[:], t.hashCAData) && + bytes.Equal(certHash[:], t.hashCertData) && + bytes.Equal(keyHash[:], t.hashKeyData) rt := t.rt t.mtx.RUnlock() if equal { @@ -803,9 +1119,11 @@ func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { } // Create a new RoundTripper. + // The cert and key files are read separately by the client + // using GetClientCertificate. tlsConfig := t.tlsConfig.Clone() - if !updateRootCA(tlsConfig, b) { - return nil, fmt.Errorf("unable to use specified CA cert %s", t.caFile) + if !updateRootCA(tlsConfig, caData) { + return nil, fmt.Errorf("unable to use specified CA cert %s", t.settings.CAFile) } rt, err = t.newRT(tlsConfig) if err != nil { @@ -815,7 +1133,9 @@ func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { t.mtx.Lock() t.rt = rt - t.hashCAFile = h[:] + t.hashCAData = caHash[:] + t.hashCertData = certHash[:] + t.hashKeyData = keyHash[:] t.mtx.Unlock() return rt.RoundTrip(req) @@ -829,6 +1149,28 @@ func (t *tlsRoundTripper) CloseIdleConnections() { } } +type userAgentRoundTripper struct { + userAgent string + rt http.RoundTripper +} + +// NewUserAgentRoundTripper adds the user agent every request header. +func NewUserAgentRoundTripper(userAgent string, rt http.RoundTripper) http.RoundTripper { + return &userAgentRoundTripper{userAgent, rt} +} + +func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + req.Header.Set("User-Agent", rt.userAgent) + return rt.rt.RoundTrip(req) +} + +func (rt *userAgentRoundTripper) CloseIdleConnections() { + if ci, ok := rt.rt.(closeIdler); ok { + ci.CloseIdleConnections() + } +} + func (c HTTPClientConfig) String() string { b, err := yaml.Marshal(c) if err != nil { @@ -836,3 +1178,78 @@ func (c HTTPClientConfig) String() string { } return string(b) } + +type ProxyConfig struct { + // HTTP proxy server to use to connect to the targets. + ProxyURL URL `yaml:"proxy_url,omitempty" json:"proxy_url,omitempty"` + // NoProxy contains addresses that should not use a proxy. + NoProxy string `yaml:"no_proxy,omitempty" json:"no_proxy,omitempty"` + // ProxyFromEnvironment makes use of net/http ProxyFromEnvironment function + // to determine proxies. + ProxyFromEnvironment bool `yaml:"proxy_from_environment,omitempty" json:"proxy_from_environment,omitempty"` + // ProxyConnectHeader optionally specifies headers to send to + // proxies during CONNECT requests. Assume that at least _some_ of + // these headers are going to contain secrets and use Secret as the + // value type instead of string. + ProxyConnectHeader Header `yaml:"proxy_connect_header,omitempty" json:"proxy_connect_header,omitempty"` + + proxyFunc func(*http.Request) (*url.URL, error) +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *ProxyConfig) Validate() error { + if len(c.ProxyConnectHeader) > 0 && (!c.ProxyFromEnvironment && (c.ProxyURL.URL == nil || c.ProxyURL.String() == "")) { + return fmt.Errorf("if proxy_connect_header is configured, proxy_url or proxy_from_environment must also be configured") + } + if c.ProxyFromEnvironment && c.ProxyURL.URL != nil && c.ProxyURL.String() != "" { + return fmt.Errorf("if proxy_from_environment is configured, proxy_url must not be configured") + } + if c.ProxyFromEnvironment && c.NoProxy != "" { + return fmt.Errorf("if proxy_from_environment is configured, no_proxy must not be configured") + } + if c.ProxyURL.URL == nil && c.NoProxy != "" { + return fmt.Errorf("if no_proxy is configured, proxy_url must also be configured") + } + return nil +} + +// Proxy returns the Proxy URL for a request. +func (c *ProxyConfig) Proxy() (fn func(*http.Request) (*url.URL, error)) { + if c == nil { + return nil + } + defer func() { + fn = c.proxyFunc + }() + if c.proxyFunc != nil { + return + } + if c.ProxyFromEnvironment { + proxyFn := httpproxy.FromEnvironment().ProxyFunc() + c.proxyFunc = func(req *http.Request) (*url.URL, error) { + return proxyFn(req.URL) + } + return + } + if c.ProxyURL.URL != nil && c.ProxyURL.URL.String() != "" { + if c.NoProxy == "" { + c.proxyFunc = http.ProxyURL(c.ProxyURL.URL) + return + } + proxy := &httpproxy.Config{ + HTTPProxy: c.ProxyURL.String(), + HTTPSProxy: c.ProxyURL.String(), + NoProxy: c.NoProxy, + } + proxyFn := proxy.ProxyFunc() + c.proxyFunc = func(req *http.Request) (*url.URL, error) { + return proxyFn(req.URL) + } + } + return +} + +// ProxyConnectHeader() return the Proxy Connext Headers. +func (c *ProxyConfig) GetProxyConnectHeader() http.Header { + return c.ProxyConnectHeader.HTTPHeader() +} diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 7657f841d..906397815 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -115,32 +115,31 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { // textDecoder implements the Decoder interface for the text protocol. type textDecoder struct { r io.Reader - p TextParser - fams []*dto.MetricFamily + fams map[string]*dto.MetricFamily + err error } // Decode implements the Decoder interface. func (d *textDecoder) Decode(v *dto.MetricFamily) error { - // TODO(fabxc): Wrap this as a line reader to make streaming safer. - if len(d.fams) == 0 { - // No cached metric families, read everything and parse metrics. - fams, err := d.p.TextToMetricFamilies(d.r) - if err != nil { - return err - } - if len(fams) == 0 { - return io.EOF - } - d.fams = make([]*dto.MetricFamily, 0, len(fams)) - for _, f := range fams { - d.fams = append(d.fams, f) + if d.err == nil { + // Read all metrics in one shot. + var p TextParser + d.fams, d.err = p.TextToMetricFamilies(d.r) + // If we don't get an error, store io.EOF for the end. + if d.err == nil { + d.err = io.EOF } } - - *v = *d.fams[0] - d.fams = d.fams[1:] - - return nil + // Pick off one MetricFamily per Decode until there's nothing left. + for key, fam := range d.fams { + v.Name = fam.Name + v.Help = fam.Help + v.Type = fam.Type + v.Metric = fam.Metric + delete(d.fams, key) + return nil + } + return d.err } // SampleDecoder wraps a Decoder to extract samples from the metric families diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 64dc0eb40..7f611ffaa 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,9 +18,9 @@ import ( "io" "net/http" - "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/matttproud/golang_protobuf_extensions/pbutil" "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + "google.golang.org/protobuf/encoding/prototext" dto "github.com/prometheus/client_model/go" ) @@ -99,8 +99,11 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { return FmtText } - if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") { - return FmtOpenMetrics + if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { + if ver == OpenMetricsVersion_1_0_0 { + return FmtOpenMetrics_1_0_0 + } + return FmtOpenMetrics_0_0_1 } } return FmtText @@ -133,7 +136,7 @@ func NewEncoder(w io.Writer, format Format) Encoder { case FmtProtoText: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + _, err := fmt.Fprintln(w, prototext.Format(v)) return err }, close: func() error { return nil }, @@ -146,7 +149,7 @@ func NewEncoder(w io.Writer, format Format) Encoder { }, close: func() error { return nil }, } - case FmtOpenMetrics: + case FmtOpenMetrics_0_0_1, FmtOpenMetrics_1_0_0: return encoderCloser{ encode: func(v *dto.MetricFamily) error { _, err := MetricFamilyToOpenMetrics(w, v) diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 0f176fa64..c4cb20f0d 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -19,20 +19,22 @@ type Format string // Constants to assemble the Content-Type values for the different wire protocols. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - OpenMetricsType = `application/openmetrics-text` - OpenMetricsVersion = "0.0.1" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + OpenMetricsVersion_0_0_1 = "0.0.1" + OpenMetricsVersion_1_0_0 = "1.0.0" // The Content-Type values for the different wire protocols. - FmtUnknown Format = `` - FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - FmtProtoText Format = ProtoFmt + ` encoding=text` - FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` - FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8` + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go index dc2eedeef..dfac962a4 100644 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -12,6 +12,7 @@ // limitations under the License. // Build only when actually fuzzing +//go:build gofuzz // +build gofuzz package expfmt @@ -20,8 +21,8 @@ import "bytes" // Fuzz text metric parser with with github.com/dvyukov/go-fuzz: // -// go-fuzz-build github.com/prometheus/common/expfmt -// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz // // Further input samples should go in the folder fuzz/corpus. func Fuzz(in []byte) int { diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 8a9313a3b..21cdddcf0 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -22,7 +22,6 @@ import ( "strconv" "strings" - "github.com/golang/protobuf/ptypes" "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" @@ -47,20 +46,20 @@ import ( // missing features and peculiarities to avoid complications when switching from // Prometheus to OpenMetrics or vice versa: // -// - Counters are expected to have the `_total` suffix in their metric name. In -// the output, the suffix will be truncated from the `# TYPE` and `# HELP` -// line. A counter with a missing `_total` suffix is not an error. However, -// its type will be set to `unknown` in that case to avoid invalid OpenMetrics -// output. +// - Counters are expected to have the `_total` suffix in their metric name. In +// the output, the suffix will be truncated from the `# TYPE` and `# HELP` +// line. A counter with a missing `_total` suffix is not an error. However, +// its type will be set to `unknown` in that case to avoid invalid OpenMetrics +// output. // -// - No support for the following (optional) features: `# UNIT` line, `_created` -// line, info type, stateset type, gaugehistogram type. +// - No support for the following (optional) features: `# UNIT` line, `_created` +// line, info type, stateset type, gaugehistogram type. // -// - The size of exemplar labels is not checked (i.e. it's possible to create -// exemplars that are larger than allowed by the OpenMetrics specification). +// - The size of exemplar labels is not checked (i.e. it's possible to create +// exemplars that are larger than allowed by the OpenMetrics specification). // -// - The value of Counters is not checked. (OpenMetrics doesn't allow counters -// with a `NaN` value.) +// - The value of Counters is not checked. (OpenMetrics doesn't allow counters +// with a `NaN` value.) func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { name := in.GetName() if name == "" { @@ -473,10 +472,11 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - ts, err := ptypes.Timestamp((*e).Timestamp) + err = (*e).Timestamp.CheckValid() if err != nil { return written, err } + ts := (*e).Timestamp.AsTime() // TODO(beorn7): Format this directly from components of ts to // avoid overflow/underflow and precision issues of the float // conversion. diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 5ba503b06..2946b8f1a 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -17,7 +17,6 @@ import ( "bufio" "fmt" "io" - "io/ioutil" "math" "strconv" "strings" @@ -44,7 +43,7 @@ const ( var ( bufPool = sync.Pool{ New: func() interface{} { - return bufio.NewWriter(ioutil.Discard) + return bufio.NewWriter(io.Discard) }, } numBufPool = sync.Pool{ diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 84be0643e..35db1cc9d 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -24,8 +24,8 @@ import ( dto "github.com/prometheus/client_model/go" - "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/prometheus/common/model" + "google.golang.org/protobuf/proto" ) // A stateFn is a function that represents a state in a state machine. By @@ -142,9 +142,13 @@ func (p *TextParser) reset(in io.Reader) { func (p *TextParser) startOfLine() stateFn { p.lineCount++ if p.skipBlankTab(); p.err != nil { - // End of input reached. This is the only case where - // that is not an error but a signal that we are done. - p.err = nil + // This is the only place that we expect to see io.EOF, + // which is not an error but the signal that we are done. + // Any other error that happens to align with the start of + // a line is still an error. + if p.err == io.EOF { + p.err = nil + } return nil } switch p.currentByte { diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go index 26e92288c..a21b9d15d 100644 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -11,18 +11,18 @@ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -35,8 +35,6 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - */ package goautoneg diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index 7f67b16e4..5727452c1 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -18,7 +18,6 @@ import ( "errors" "fmt" "math" - "regexp" "strconv" "strings" "time" @@ -183,54 +182,78 @@ func (d *Duration) Type() string { return "duration" } -var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$") +func isdigit(c byte) bool { return c >= '0' && c <= '9' } + +// Units are required to go in order from biggest to smallest. +// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day. +var unitMap = map[string]struct { + pos int + mult uint64 +}{ + "ms": {7, uint64(time.Millisecond)}, + "s": {6, uint64(time.Second)}, + "m": {5, uint64(time.Minute)}, + "h": {4, uint64(time.Hour)}, + "d": {3, uint64(24 * time.Hour)}, + "w": {2, uint64(7 * 24 * time.Hour)}, + "y": {1, uint64(365 * 24 * time.Hour)}, +} // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. -func ParseDuration(durationStr string) (Duration, error) { - switch durationStr { +func ParseDuration(s string) (Duration, error) { + switch s { case "0": // Allow 0 without a unit. return 0, nil case "": - return 0, fmt.Errorf("empty duration string") - } - matches := durationRE.FindStringSubmatch(durationStr) - if matches == nil { - return 0, fmt.Errorf("not a valid duration string: %q", durationStr) + return 0, errors.New("empty duration string") } - var dur time.Duration - // Parse the match at pos `pos` in the regex and use `mult` to turn that - // into ms, then add that value to the total parsed duration. - var overflowErr error - m := func(pos int, mult time.Duration) { - if matches[pos] == "" { - return + orig := s + var dur uint64 + lastUnitPos := 0 + + for s != "" { + if !isdigit(s[0]) { + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + // Consume [0-9]* + i := 0 + for ; i < len(s) && isdigit(s[i]); i++ { + } + v, err := strconv.ParseUint(s[:i], 10, 0) + if err != nil { + return 0, fmt.Errorf("not a valid duration string: %q", orig) } - n, _ := strconv.Atoi(matches[pos]) + s = s[i:] + // Consume unit. + for i = 0; i < len(s) && !isdigit(s[i]); i++ { + } + if i == 0 { + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + u := s[:i] + s = s[i:] + unit, ok := unitMap[u] + if !ok { + return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig) + } + if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest. + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + lastUnitPos = unit.pos // Check if the provided duration overflows time.Duration (> ~ 290years). - if n > int((1<<63-1)/mult/time.Millisecond) { - overflowErr = errors.New("duration out of range") + if v > 1<<63/unit.mult { + return 0, errors.New("duration out of range") } - d := time.Duration(n) * time.Millisecond - dur += d * mult - - if dur < 0 { - overflowErr = errors.New("duration out of range") + dur += v * unit.mult + if dur > 1<<63-1 { + return 0, errors.New("duration out of range") } } - - m(2, 1000*60*60*24*365) // y - m(4, 1000*60*60*24*7) // w - m(6, 1000*60*60*24) // d - m(8, 1000*60*60) // h - m(10, 1000*60) // m - m(12, 1000) // s - m(14, 1) // ms - - return Duration(dur), overflowErr + return Duration(dur), nil } func (d Duration) String() string { diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index c9d8fb1a2..9eb440413 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -16,20 +16,12 @@ package model import ( "encoding/json" "fmt" - "math" "sort" "strconv" "strings" ) var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} - // ZeroSample is the pseudo zero-value of Sample used to signal a // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, // and metric nil. Note that the natural zero value of Sample has a timestamp @@ -38,82 +30,14 @@ var ( ZeroSample = Sample{Timestamp: Earliest} ) -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -// Equal returns true if the value of v and o is equal or if both are NaN. Note -// that v==o is false if both are NaN. If you want the conventional float -// behavior, use == to compare two SampleValues. -func (v SampleValue) Equal(o SampleValue) bool { - if v == o { - return true - } - return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -// MarshalJSON implements json.Marshaler. -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. -func (s *SamplePair) Equal(o *SamplePair) bool { - return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} - -// Sample is a sample pair associated with a metric. +// Sample is a sample pair associated with a metric. A single sample must either +// define Value or Histogram but not both. Histogram == nil implies the Value +// field is used, otherwise it should be ignored. type Sample struct { - Metric Metric `json:"metric"` - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` + Histogram *SampleHistogram `json:"histogram"` } // Equal compares first the metrics, then the timestamp, then the value. The @@ -129,11 +53,19 @@ func (s *Sample) Equal(o *Sample) bool { if !s.Timestamp.Equal(o.Timestamp) { return false } - + if s.Histogram != nil { + return s.Histogram.Equal(o.Histogram) + } return s.Value.Equal(o.Value) } func (s Sample) String() string { + if s.Histogram != nil { + return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }) + } return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ Timestamp: s.Timestamp, Value: s.Value, @@ -142,6 +74,19 @@ func (s Sample) String() string { // MarshalJSON implements json.Marshaler. func (s Sample) MarshalJSON() ([]byte, error) { + if s.Histogram != nil { + v := struct { + Metric Metric `json:"metric"` + Histogram SampleHistogramPair `json:"histogram"` + }{ + Metric: s.Metric, + Histogram: SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }, + } + return json.Marshal(&v) + } v := struct { Metric Metric `json:"metric"` Value SamplePair `json:"value"` @@ -152,21 +97,25 @@ func (s Sample) MarshalJSON() ([]byte, error) { Value: s.Value, }, } - return json.Marshal(&v) } // UnmarshalJSON implements json.Unmarshaler. func (s *Sample) UnmarshalJSON(b []byte) error { v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + Histogram SampleHistogramPair `json:"histogram"` }{ Metric: s.Metric, Value: SamplePair{ Timestamp: s.Timestamp, Value: s.Value, }, + Histogram: SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }, } if err := json.Unmarshal(b, &v); err != nil { @@ -174,8 +123,13 @@ func (s *Sample) UnmarshalJSON(b []byte) error { } s.Metric = v.Metric - s.Timestamp = v.Value.Timestamp - s.Value = v.Value.Value + if v.Histogram.Histogram != nil { + s.Timestamp = v.Histogram.Timestamp + s.Histogram = v.Histogram.Histogram + } else { + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + } return nil } @@ -221,80 +175,76 @@ func (s Samples) Equal(o Samples) bool { // SampleStream is a stream of Values belonging to an attached COWMetric. type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` } func (ss SampleStream) String() string { - vals := make([]string, len(ss.Values)) + valuesLength := len(ss.Values) + vals := make([]string, valuesLength+len(ss.Histograms)) for i, v := range ss.Values { vals[i] = v.String() } + for i, v := range ss.Histograms { + vals[i+valuesLength] = v.String() + } return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) } -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string +func (ss SampleStream) MarshalJSON() ([]byte, error) { + if len(ss.Histograms) > 0 && len(ss.Values) > 0 { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Values: ss.Values, + Histograms: ss.Histograms, + } + return json.Marshal(&v) + } else if len(ss.Histograms) > 0 { + v := struct { + Metric Metric `json:"metric"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Histograms: ss.Histograms, + } + return json.Marshal(&v) + } else { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + }{ + Metric: ss.Metric, + Values: ss.Values, + } + return json.Marshal(&v) + } } -func (Matrix) Type() ValueType { return ValMatrix } -func (Vector) Type() ValueType { return ValVector } -func (*Scalar) Type() ValueType { return ValScalar } -func (*String) Type() ValueType { return ValString } - -type ValueType int - -const ( - ValNone ValueType = iota - ValScalar - ValVector - ValMatrix - ValString -) - -// MarshalJSON implements json.Marshaler. -func (et ValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(et.String()) -} +func (ss *SampleStream) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Values: ss.Values, + Histograms: ss.Histograms, + } -func (et *ValueType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { + if err := json.Unmarshal(b, &v); err != nil { return err } - switch s { - case "": - *et = ValNone - case "scalar": - *et = ValScalar - case "vector": - *et = ValVector - case "matrix": - *et = ValMatrix - case "string": - *et = ValString - default: - return fmt.Errorf("unknown value type %q", s) - } - return nil -} -func (e ValueType) String() string { - switch e { - case ValNone: - return "" - case ValScalar: - return "scalar" - case ValVector: - return "vector" - case ValMatrix: - return "matrix" - case ValString: - return "string" - } - panic("ValueType.String: unhandled value type") + ss.Metric = v.Metric + ss.Values = v.Values + ss.Histograms = v.Histograms + + return nil } // Scalar is a scalar value evaluated at the set timestamp. diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go new file mode 100644 index 000000000..0f615a705 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -0,0 +1,100 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "strconv" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go new file mode 100644 index 000000000..54bb038cf --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -0,0 +1,178 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type FloatString float64 + +func (v FloatString) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +func (v FloatString) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +func (v *FloatString) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("float value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = FloatString(f) + return nil +} + +type HistogramBucket struct { + Boundaries int32 + Lower FloatString + Upper FloatString + Count FloatString +} + +func (s HistogramBucket) MarshalJSON() ([]byte, error) { + b, err := json.Marshal(s.Boundaries) + if err != nil { + return nil, err + } + l, err := json.Marshal(s.Lower) + if err != nil { + return nil, err + } + u, err := json.Marshal(s.Upper) + if err != nil { + return nil, err + } + c, err := json.Marshal(s.Count) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil +} + +func (s *HistogramBucket) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + return nil +} + +func (s *HistogramBucket) Equal(o *HistogramBucket) bool { + return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) +} + +func (b HistogramBucket) String() string { + var sb strings.Builder + lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3 + upperInclusive := b.Boundaries == 0 || b.Boundaries == 3 + if lowerInclusive { + sb.WriteRune('[') + } else { + sb.WriteRune('(') + } + fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + if upperInclusive { + sb.WriteRune(']') + } else { + sb.WriteRune(')') + } + fmt.Fprintf(&sb, ":%v", b.Count) + return sb.String() +} + +type HistogramBuckets []*HistogramBucket + +func (s HistogramBuckets) Equal(o HistogramBuckets) bool { + if len(s) != len(o) { + return false + } + + for i, bucket := range s { + if !bucket.Equal(o[i]) { + return false + } + } + return true +} + +type SampleHistogram struct { + Count FloatString `json:"count"` + Sum FloatString `json:"sum"` + Buckets HistogramBuckets `json:"buckets"` +} + +func (s SampleHistogram) String() string { + return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets) +} + +func (s *SampleHistogram) Equal(o *SampleHistogram) bool { + return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets)) +} + +type SampleHistogramPair struct { + Timestamp Time + // Histogram should never be nil, it's only stored as pointer for efficiency. + Histogram *SampleHistogram +} + +func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { + if s.Histogram == nil { + return nil, fmt.Errorf("histogram is nil") + } + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Histogram) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Timestamp, &s.Histogram} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + if s.Histogram == nil { + return fmt.Errorf("histogram is null") + } + return nil +} + +func (s SampleHistogramPair) String() string { + return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp) +} + +func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool { + return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp)) +} diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go new file mode 100644 index 000000000..726c50ee6 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_type.go @@ -0,0 +1,83 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" +) + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} diff --git a/vendor/github.com/prometheus/common/version/info.go b/vendor/github.com/prometheus/common/version/info.go index 3e2a7ee50..00caa0ba4 100644 --- a/vendor/github.com/prometheus/common/version/info.go +++ b/vendor/github.com/prometheus/common/version/info.go @@ -31,6 +31,8 @@ var ( BuildUser string BuildDate string GoVersion = runtime.Version() + GoOS = runtime.GOOS + GoArch = runtime.GOARCH ) // NewCollector returns a collector that exports metrics about current version @@ -41,14 +43,17 @@ func NewCollector(program string) prometheus.Collector { Namespace: program, Name: "build_info", Help: fmt.Sprintf( - "A metric with a constant '1' value labeled by version, revision, branch, and goversion from which %s was built.", + "A metric with a constant '1' value labeled by version, revision, branch, goversion from which %s was built, and the goos and goarch for the build.", program, ), ConstLabels: prometheus.Labels{ "version": Version, - "revision": Revision, + "revision": getRevision(), "branch": Branch, "goversion": GoVersion, + "goos": GoOS, + "goarch": GoArch, + "tags": getTags(), }, }, func() float64 { return 1 }, @@ -62,6 +67,7 @@ var versionInfoTmpl = ` build date: {{.buildDate}} go version: {{.goVersion}} platform: {{.platform}} + tags: {{.tags}} ` // Print returns version information. @@ -69,12 +75,13 @@ func Print(program string) string { m := map[string]string{ "program": program, "version": Version, - "revision": Revision, + "revision": getRevision(), "branch": Branch, "buildUser": BuildUser, "buildDate": BuildDate, "goVersion": GoVersion, - "platform": runtime.GOOS + "/" + runtime.GOARCH, + "platform": GoOS + "/" + GoArch, + "tags": getTags(), } t := template.Must(template.New("version").Parse(versionInfoTmpl)) @@ -87,10 +94,10 @@ func Print(program string) string { // Info returns version, branch and revision information. func Info() string { - return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, Revision) + return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, getRevision()) } -// BuildContext returns goVersion, buildUser and buildDate information. +// BuildContext returns goVersion, platform, buildUser and buildDate information. func BuildContext() string { - return fmt.Sprintf("(go=%s, user=%s, date=%s)", GoVersion, BuildUser, BuildDate) + return fmt.Sprintf("(go=%s, platform=%s, user=%s, date=%s, tags=%s)", GoVersion, GoOS+"/"+GoArch, BuildUser, BuildDate, getTags()) } diff --git a/vendor/github.com/prometheus/common/version/info_default.go b/vendor/github.com/prometheus/common/version/info_default.go new file mode 100644 index 000000000..8eb3a0bf1 --- /dev/null +++ b/vendor/github.com/prometheus/common/version/info_default.go @@ -0,0 +1,25 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.18 +// +build !go1.18 + +package version + +func getRevision() string { + return Revision +} + +func getTags() string { + return "unknown" // Not available prior to Go 1.18 +} diff --git a/vendor/github.com/prometheus/common/version/info_go118.go b/vendor/github.com/prometheus/common/version/info_go118.go new file mode 100644 index 000000000..bfc7d4103 --- /dev/null +++ b/vendor/github.com/prometheus/common/version/info_go118.go @@ -0,0 +1,67 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package version + +import "runtime/debug" + +var computedRevision string +var computedTags string + +func getRevision() string { + if Revision != "" { + return Revision + } + return computedRevision +} + +func getTags() string { + return computedTags +} + +func init() { + computedRevision, computedTags = computeRevision() +} + +func computeRevision() (string, string) { + var ( + rev = "unknown" + tags = "unknown" + modified bool + ) + + buildInfo, ok := debug.ReadBuildInfo() + if !ok { + return rev, tags + } + for _, v := range buildInfo.Settings { + if v.Key == "vcs.revision" { + rev = v.Value + } + if v.Key == "vcs.modified" { + if v.Value == "true" { + modified = true + } + } + if v.Key == "-tags" { + tags = v.Value + } + } + if modified { + return rev + "-modified", tags + } + return rev, tags +} diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore index 25e3659ab..7cc33ae4a 100644 --- a/vendor/github.com/prometheus/procfs/.gitignore +++ b/vendor/github.com/prometheus/procfs/.gitignore @@ -1 +1,2 @@ -/fixtures/ +/testdata/fixtures/ +/fixtures diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index 0aa09edac..a197699a1 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,4 +1,12 @@ --- linters: enable: - - golint + - godot + - revive + +linter-settings: + godot: + capital: true + exclude: + # Ignore "See: URL" + - 'See:' diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md index 9a1aff412..d325872bd 100644 --- a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md +++ b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md @@ -1,3 +1,3 @@ -## Prometheus Community Code of Conduct +# Prometheus Community Code of Conduct -Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). +Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md index 943de7615..853eb9d49 100644 --- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md +++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md @@ -97,7 +97,7 @@ Many of the files are changing continuously and the data being read can in some reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the full file in a single operation using an internal utility function called `util.ReadFileNoStat`. -This function is similar to `ioutil.ReadFile`, but it avoids the system call to `stat` to get the current size of +This function is similar to `os.ReadFile`, but it avoids the system call to `stat` to get the current size of the file. Note that parsing the file's contents can still be performed one line at a time. This is done by first reading @@ -113,7 +113,7 @@ the full file, and then using a scanner on the `[]byte` or `string` containing t ``` The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files -can be read using an internal function called `util.SysReadFile` which is similar to `ioutil.ReadFile` but does +can be read using an internal function called `util.SysReadFile` which is similar to `os.ReadFile` but does not bother to check the size of the file before reading. ``` data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity") diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile index fa2bd5b52..7edfe4d09 100644 --- a/vendor/github.com/prometheus/procfs/Makefile +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -14,18 +14,18 @@ include Makefile.common %/.unpacked: %.ttar - @echo ">> extracting fixtures" + @echo ">> extracting fixtures $*" ./ttar -C $(dir $*) -x -f $*.ttar touch $@ -fixtures: fixtures/.unpacked +fixtures: testdata/fixtures/.unpacked update_fixtures: - rm -vf fixtures/.unpacked - ./ttar -c -f fixtures.ttar fixtures/ + rm -vf testdata/fixtures/.unpacked + ./ttar -c -f testdata/fixtures.ttar -C testdata/ fixtures/ .PHONY: build build: .PHONY: test -test: fixtures/.unpacked common-test +test: testdata/fixtures/.unpacked common-test diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index a1b1ca40f..e358db69c 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -36,29 +36,6 @@ GO_VERSION ?= $(shell $(GO) version) GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') -GOVENDOR := -GO111MODULE := -ifeq (, $(PRE_GO_111)) - ifneq (,$(wildcard go.mod)) - # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). - GO111MODULE := on - - ifneq (,$(wildcard vendor)) - # Always use the local vendor/ directory to satisfy the dependencies. - GOOPTS := $(GOOPTS) -mod=vendor - endif - endif -else - ifneq (,$(wildcard go.mod)) - ifneq (,$(wildcard vendor)) -$(warning This repository requires Go >= 1.11 because of Go modules) -$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') - endif - else - # This repository isn't using Go modules (yet). - GOVENDOR := $(FIRST_GOPATH)/bin/govendor - endif -endif PROMU := $(FIRST_GOPATH)/bin/promu pkgs = ./... @@ -78,17 +55,26 @@ ifneq ($(shell which gotestsum),) endif endif -PROMU_VERSION ?= 0.12.0 +PROMU_VERSION ?= 0.14.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz +SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.39.0 +GOLANGCI_LINT_VERSION ?= v1.49.0 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) - GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + # If we're in CI and there is an Actions file, that means the linter + # is being run in Actions, so we don't need to run it here. + ifneq (,$(SKIP_GOLANGCI_LINT)) + GOLANGCI_LINT := + else ifeq (,$(CIRCLE_JOB)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + endif endif endif @@ -144,32 +130,25 @@ common-check_license: .PHONY: common-deps common-deps: @echo ">> getting dependencies" -ifdef GO111MODULE - GO111MODULE=$(GO111MODULE) $(GO) mod download -else - $(GO) get $(GOOPTS) -t ./... -endif + $(GO) mod download .PHONY: update-go-deps update-go-deps: @echo ">> updating Go dependencies" @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ - $(GO) get $$m; \ + $(GO) get -d $$m; \ done - GO111MODULE=$(GO111MODULE) $(GO) mod tidy -ifneq (,$(wildcard vendor)) - GO111MODULE=$(GO111MODULE) $(GO) mod vendor -endif + $(GO) mod tidy .PHONY: common-test-short common-test-short: $(GOTEST_DIR) @echo ">> running short tests" - GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs) + $(GOTEST) -short $(GOOPTS) $(pkgs) .PHONY: common-test common-test: $(GOTEST_DIR) @echo ">> running all tests" - GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) + $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) $(GOTEST_DIR): @mkdir -p $@ @@ -177,25 +156,21 @@ $(GOTEST_DIR): .PHONY: common-format common-format: @echo ">> formatting code" - GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) + $(GO) fmt $(pkgs) .PHONY: common-vet common-vet: @echo ">> vetting code" - GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) + $(GO) vet $(GOOPTS) $(pkgs) .PHONY: common-lint common-lint: $(GOLANGCI_LINT) ifdef GOLANGCI_LINT @echo ">> running golangci-lint" -ifdef GO111MODULE # 'go list' needs to be executed before staticcheck to prepopulate the modules cache. # Otherwise staticcheck might fail randomly for some reason not yet explained. - GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null - GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) -else - $(GOLANGCI_LINT) run $(pkgs) -endif + $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null + $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) endif .PHONY: common-yamllint @@ -212,28 +187,15 @@ endif common-staticcheck: lint .PHONY: common-unused -common-unused: $(GOVENDOR) -ifdef GOVENDOR - @echo ">> running check for unused packages" - @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' -else -ifdef GO111MODULE +common-unused: @echo ">> running check for unused/missing packages in go.mod" - GO111MODULE=$(GO111MODULE) $(GO) mod tidy -ifeq (,$(wildcard vendor)) + $(GO) mod tidy @git diff --exit-code -- go.sum go.mod -else - @echo ">> running check for unused packages in vendor/" - GO111MODULE=$(GO111MODULE) $(GO) mod vendor - @git diff --exit-code -- go.sum go.mod vendor/ -endif -endif -endif .PHONY: common-build common-build: promu @echo ">> building binaries" - GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) + $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) .PHONY: common-tarball common-tarball: promu @@ -289,12 +251,6 @@ $(GOLANGCI_LINT): | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) endif -ifdef GOVENDOR -.PHONY: $(GOVENDOR) -$(GOVENDOR): - GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor -endif - .PHONY: precheck precheck:: diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md index 67741f015..fed02d85c 100644 --- a/vendor/github.com/prometheus/procfs/SECURITY.md +++ b/vendor/github.com/prometheus/procfs/SECURITY.md @@ -3,4 +3,4 @@ The Prometheus security policy, including how to report vulnerabilities, can be found here: -https://prometheus.io/docs/operating/security/ + diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index 4e47e6172..68f36e888 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -15,11 +15,28 @@ package procfs import ( "fmt" - "io/ioutil" "net" + "os" + "strconv" "strings" ) +// Learned from include/uapi/linux/if_arp.h. +const ( + // completed entry (ha valid). + ATFComplete = 0x02 + // permanent entry. + ATFPermanent = 0x04 + // Publish entry. + ATFPublish = 0x08 + // Has requested trailers. + ATFUseTrailers = 0x10 + // Obsoleted: Want to use a netmask (only for proxy entries). + ATFNetmask = 0x20 + // Don't answer this addresses. + ATFDontPublish = 0x40 +) + // ARPEntry contains a single row of the columnar data represented in // /proc/net/arp. type ARPEntry struct { @@ -29,12 +46,14 @@ type ARPEntry struct { HWAddr net.HardwareAddr // Name of the device Device string + // Flags + Flags byte } // GatherARPEntries retrieves all the ARP entries, parse the relevant columns, // and then return a slice of ARPEntry's. func (fs FS) GatherARPEntries() ([]ARPEntry, error) { - data, err := ioutil.ReadFile(fs.proc.Path("net/arp")) + data, err := os.ReadFile(fs.proc.Path("net/arp")) if err != nil { return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err) } @@ -72,14 +91,26 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) { } func parseARPEntry(columns []string) (ARPEntry, error) { + entry := ARPEntry{Device: columns[5]} ip := net.ParseIP(columns[0]) - mac := net.HardwareAddr(columns[3]) + entry.IPAddr = ip + + if mac, err := net.ParseMAC(columns[3]); err == nil { + entry.HWAddr = mac + } else { + return ARPEntry{}, err + } - entry := ARPEntry{ - IPAddr: ip, - HWAddr: mac, - Device: columns[5], + if flags, err := strconv.ParseUint(columns[2], 0, 8); err == nil { + entry.Flags = byte(flags) + } else { + return ARPEntry{}, err } return entry, nil } + +// IsComplete returns true if ARP entry is marked with complete flag. +func (entry *ARPEntry) IsComplete() bool { + return entry.Flags&ATFComplete != 0 +} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index 5623b24a1..06968ca2e 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux // +build linux package procfs @@ -27,7 +28,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// CPUInfo contains general information about a system CPU found in /proc/cpuinfo +// CPUInfo contains general information about a system CPU found in /proc/cpuinfo. type CPUInfo struct { Processor uint VendorID string @@ -379,6 +380,42 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { return cpuinfo, nil } +func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + systemType := field[1] + i := 0 + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + cpuinfo[i].VendorID = systemType + case "CPU Family": + cpuinfo[i].CPUFamily = field[1] + case "Model Name": + cpuinfo[i].ModelName = field[1] + } + } + return cpuinfo, nil +} + func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { scanner := bufio.NewScanner(bytes.NewReader(info)) @@ -469,7 +506,7 @@ func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode } // firstNonEmptyLine advances the scanner to the first non-empty line -// and returns the contents of that line +// and returns the contents of that line. func firstNonEmptyLine(scanner *bufio.Scanner) string { for scanner.Scan() { line := scanner.Text() diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go index 44b590ed3..64cfd534c 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (arm || arm64) // +build linux // +build arm arm64 diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go new file mode 100644 index 000000000..d88442f0e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go @@ -0,0 +1,19 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoLoong diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go index 91e272573..c11207f3a 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (mips || mipsle || mips64 || mips64le) // +build linux // +build mips mipsle mips64 mips64le diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go index 95b5b4ec4..a6b2b3127 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux -// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x +//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x +// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go index 6068bd571..003bc2ad4 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (ppc64 || ppc64le) // +build linux // +build ppc64 ppc64le diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go index e83c2e207..1c9b7313b 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (riscv || riscv64) // +build linux // +build riscv riscv64 diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go index 26814eeba..fa3686bc0 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux // +build linux package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go index d5bedf97f..a0ef55562 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (386 || amd64) // +build linux // +build 386 amd64 diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go index d31a82600..f9d961e44 100644 --- a/vendor/github.com/prometheus/procfs/doc.go +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -16,30 +16,29 @@ // // Example: // -// package main -// -// import ( -// "fmt" -// "log" -// -// "github.com/prometheus/procfs" -// ) -// -// func main() { -// p, err := procfs.Self() -// if err != nil { -// log.Fatalf("could not get process: %s", err) -// } -// -// stat, err := p.Stat() -// if err != nil { -// log.Fatalf("could not get process stat: %s", err) -// } -// -// fmt.Printf("command: %s\n", stat.Comm) -// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) -// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) -// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) -// } -// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.Stat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } package procfs diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar deleted file mode 100644 index 5e7eeef4a..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ /dev/null @@ -1,7673 +0,0 @@ -# Archive created by ttar -c -f fixtures.ttar fixtures/ -Directory: fixtures -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/cmdline -Lines: 1 -vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/comm -Lines: 1 -vim -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/cwd -SymlinkTo: /usr/bin -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/environ -Lines: 1 -PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/binNULLBYTEHOSTNAME=cd24e11f73a5NULLBYTETERM=xtermNULLBYTEGOLANG_VERSION=1.12.5NULLBYTEGOPATH=/goNULLBYTEHOME=/rootNULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/exe -SymlinkTo: /usr/bin/vim -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/10 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/fdinfo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/0 -Lines: 6 -pos: 0 -flags: 02004000 -mnt_id: 13 -inotify wd:3 ino:1 sdev:34 mask:fce ignored_mask:0 fhandle-bytes:c fhandle-type:81 f_handle:000000000100000000000000 -inotify wd:2 ino:1300016 sdev:fd00002 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:16003001ed3f022a -inotify wd:1 ino:2e0001 sdev:fd00000 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:01002e00138e7c65 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/1 -Lines: 4 -pos: 0 -flags: 02004002 -mnt_id: 13 -eventfd-count: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/10 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/2 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/3 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/io -Lines: 7 -rchar: 750339 -wchar: 818609 -syscr: 7405 -syscw: 5245 -read_bytes: 1024 -write_bytes: 2048 -cancelled_write_bytes: -1024 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 62898 62898 processes -Max open files 2048 4096 files -Max locked memory 18446744073708503040 18446744073708503040 bytes -Max address space 8589934592 unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 62898 62898 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/mountstats -Lines: 20 -device rootfs mounted on / with fstype rootfs -device sysfs mounted on /sys with fstype sysfs -device proc mounted on /proc with fstype proc -device /dev/sda1 mounted on / with fstype ext4 -device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 - opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.1,clientaddr=192.168.1.5,local_lock=none - age: 13968 - caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 - nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured - sec: flavor=1,pseudoflavor=1 - events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 - bytes: 1207640230 0 0 0 1210214218 0 295483 0 - RPC iostats version: 1.0 p/v: 100003/4 (nfs) - xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 - per-op statistics - NULL: 0 0 0 0 0 0 0 0 - READ: 1298 1298 0 207680 1210292152 6 79386 79407 - WRITE: 0 0 0 0 0 0 0 0 - ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/net/dev -Lines: 4 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed - lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/ns -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/ns/mnt -SymlinkTo: mnt:[4026531840] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/ns/net -SymlinkTo: net:[4026531993] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/root -SymlinkTo: / -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/schedstat -Lines: 1 -411605849 93680043 79 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/smaps -Lines: 252 -00400000-00cb1000 r-xp 00000000 fd:01 952273 /bin/alertmanager -Size: 8900 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 2952 kB -Pss: 2952 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 2952 kB -Private_Dirty: 0 kB -Referenced: 2864 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex mr mw me dw sd -00cb1000-016b0000 r--p 008b1000 fd:01 952273 /bin/alertmanager -Size: 10236 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 6152 kB -Pss: 6152 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 6152 kB -Private_Dirty: 0 kB -Referenced: 5308 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd mr mw me dw sd -016b0000-0171a000 rw-p 012b0000 fd:01 952273 /bin/alertmanager -Size: 424 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 176 kB -Pss: 176 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 84 kB -Private_Dirty: 92 kB -Referenced: 176 kB -Anonymous: 92 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 12 kB -SwapPss: 12 kB -Locked: 0 kB -VmFlags: rd wr mr mw me dw ac sd -0171a000-0173f000 rw-p 00000000 00:00 0 -Size: 148 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 76 kB -Pss: 76 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 76 kB -Referenced: 76 kB -Anonymous: 76 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -c000000000-c000400000 rw-p 00000000 00:00 0 -Size: 4096 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 2564 kB -Pss: 2564 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 20 kB -Private_Dirty: 2544 kB -Referenced: 2544 kB -Anonymous: 2564 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 1100 kB -SwapPss: 1100 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -c000400000-c001600000 rw-p 00000000 00:00 0 -Size: 18432 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 16024 kB -Pss: 16024 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 5864 kB -Private_Dirty: 10160 kB -Referenced: 11944 kB -Anonymous: 16024 kB -LazyFree: 5848 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 440 kB -SwapPss: 440 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd nh -c001600000-c004000000 rw-p 00000000 00:00 0 -Size: 43008 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -7f0ab95ca000-7f0abbb7b000 rw-p 00000000 00:00 0 -Size: 38596 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 1992 kB -Pss: 1992 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 476 kB -Private_Dirty: 1516 kB -Referenced: 1828 kB -Anonymous: 1992 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 384 kB -SwapPss: 384 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -7ffc07ecf000-7ffc07ef0000 rw-p 00000000 00:00 0 [stack] -Size: 132 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 8 kB -Pss: 8 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 8 kB -Referenced: 8 kB -Anonymous: 8 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 4 kB -SwapPss: 4 kB -Locked: 0 kB -VmFlags: rd wr mr mw me gd ac -7ffc07f9e000-7ffc07fa1000 r--p 00000000 00:00 0 [vvar] -Size: 12 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd mr pf io de dd sd -7ffc07fa1000-7ffc07fa3000 r-xp 00000000 00:00 0 [vdso] -Size: 8 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 4 kB -Pss: 0 kB -Shared_Clean: 4 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 4 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex mr mw me de sd -ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall] -Size: 4 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/smaps_rollup -Lines: 17 -00400000-ffffffffff601000 ---p 00000000 00:00 0 [rollup] -Rss: 29948 kB -Pss: 29944 kB -Shared_Clean: 4 kB -Shared_Dirty: 0 kB -Private_Clean: 15548 kB -Private_Dirty: 14396 kB -Referenced: 24752 kB -Anonymous: 20756 kB -LazyFree: 5848 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 1940 kB -SwapPss: 1940 kB -Locked: 0 kB -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/stat -Lines: 1 -26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/status -Lines: 53 - -Name: prometheus -Umask: 0022 -State: S (sleeping) -Tgid: 26231 -Ngid: 0 -Pid: 26231 -PPid: 1 -TracerPid: 0 -Uid: 1000 1000 1000 0 -Gid: 1001 1001 1001 0 -FDSize: 128 -Groups: -NStgid: 1 -NSpid: 1 -NSpgid: 1 -NSsid: 1 -VmPeak: 58472 kB -VmSize: 58440 kB -VmLck: 0 kB -VmPin: 0 kB -VmHWM: 8028 kB -VmRSS: 6716 kB -RssAnon: 2092 kB -RssFile: 4624 kB -RssShmem: 0 kB -VmData: 2580 kB -VmStk: 136 kB -VmExe: 948 kB -VmLib: 6816 kB -VmPTE: 128 kB -VmPMD: 12 kB -VmSwap: 660 kB -HugetlbPages: 0 kB -Threads: 1 -SigQ: 8/63965 -SigPnd: 0000000000000000 -ShdPnd: 0000000000000000 -SigBlk: 7be3c0fe28014a03 -SigIgn: 0000000000001000 -SigCgt: 00000001800004ec -CapInh: 0000000000000000 -CapPrm: 0000003fffffffff -CapEff: 0000003fffffffff -CapBnd: 0000003fffffffff -CapAmb: 0000000000000000 -Seccomp: 0 -Cpus_allowed: ff -Cpus_allowed_list: 0-7 -Mems_allowed: 00000000,00000001 -Mems_allowed_list: 0 -voluntary_ctxt_switches: 4742839 -nonvoluntary_ctxt_switches: 1727500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/wchan -Lines: 1 -poll_schedule_timeoutEOF -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26232 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/cmdline -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/comm -Lines: 1 -ata_sff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/cwd -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26232/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/4 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 29436 29436 processes -Max open files 1024 4096 files -Max locked memory 65536 65536 bytes -Max address space unlimited unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 29436 29436 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/maps -Lines: 9 -55680ae1e000-55680ae20000 r--p 00000000 fd:01 47316994 /bin/cat -55680ae29000-55680ae2a000 rwxs 0000a000 fd:01 47316994 /bin/cat -55680bed6000-55680bef7000 rw-p 00000000 00:00 0 [heap] -7fdf964fc000-7fdf973f2000 r--p 00000000 fd:01 17432624 /usr/lib/locale/locale-archive -7fdf973f2000-7fdf97417000 r--p 00000000 fd:01 60571062 /lib/x86_64-linux-gnu/libc-2.29.so -7ffe9215c000-7ffe9217f000 rw-p 00000000 00:00 0 [stack] -7ffe921da000-7ffe921dd000 r--p 00000000 00:00 0 [vvar] -7ffe921dd000-7ffe921de000 r-xp 00000000 00:00 0 [vdso] -ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/root -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/stat -Lines: 1 -33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/wchan -Lines: 1 -0EOF -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26233 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26233/cmdline -Lines: 1 -com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26233/schedstat -Lines: 8 - ____________________________________ -< this is a malformed schedstat file > - ------------------------------------ - \ ^__^ - \ (oo)\_______ - (__)\ )\/\ - ||----w | - || || -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26234 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26234/maps -Lines: 4 -08048000-08089000 r-xp 00000000 03:01 104219 /bin/tcsh -08089000-0808c000 rw-p 00041000 03:01 104219 /bin/tcsh -0808c000-08146000 rwxp 00000000 00:00 0 -40000000-40015000 r-xp 00000000 03:01 61874 /lib/ld-2.3.2.so -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/584 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/584/stat -Lines: 2 -1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 -#!/bin/cat /proc/self/stat -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/buddyinfo -Lines: 3 -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/cmdline -Lines: 1 -BOOT_IMAGE=/vmlinuz-5.11.0-22-generic root=UUID=456a0345-450d-4f7b-b7c9-43e3241d99ad ro quiet splash vt.handoff=7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/cpuinfo -Lines: 216 -processor : 0 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 799.998 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 0 -cpu cores : 4 -apicid : 0 -initial apicid : 0 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 1 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.037 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 1 -cpu cores : 4 -apicid : 2 -initial apicid : 2 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 2 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.010 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 2 -cpu cores : 4 -apicid : 4 -initial apicid : 4 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 3 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.028 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 3 -cpu cores : 4 -apicid : 6 -initial apicid : 6 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 4 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 799.989 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 0 -cpu cores : 4 -apicid : 1 -initial apicid : 1 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 5 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.083 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 1 -cpu cores : 4 -apicid : 3 -initial apicid : 3 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 6 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.017 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 2 -cpu cores : 4 -apicid : 5 -initial apicid : 5 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 7 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.030 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 3 -cpu cores : 4 -apicid : 7 -initial apicid : 7 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/crypto -Lines: 972 -name : ccm(aes) -driver : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni)) -module : ccm -priority : 300 -refcnt : 4 -selftest : passed -internal : no -type : aead -async : no -blocksize : 1 -ivsize : 16 -maxauthsize : 16 -geniv : - -name : cbcmac(aes) -driver : cbcmac(aes-aesni) -module : ccm -priority : 300 -refcnt : 7 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 16 - -name : ecdh -driver : ecdh-generic -module : ecdh_generic -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : kpp -async : yes - -name : ecb(arc4) -driver : ecb(arc4)-generic -module : arc4 -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 1 -max keysize : 256 -ivsize : 0 -chunksize : 1 -walksize : 1 - -name : arc4 -driver : arc4-generic -module : arc4 -priority : 0 -refcnt : 3 -selftest : passed -internal : no -type : cipher -blocksize : 1 -min keysize : 1 -max keysize : 256 - -name : crct10dif -driver : crct10dif-pclmul -module : crct10dif_pclmul -priority : 200 -refcnt : 2 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 2 - -name : crc32 -driver : crc32-pclmul -module : crc32_pclmul -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : __ghash -driver : cryptd(__ghash-pclmulqdqni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : ahash -async : yes -blocksize : 16 -digestsize : 16 - -name : ghash -driver : ghash-clmulni -module : ghash_clmulni_intel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : ahash -async : yes -blocksize : 16 -digestsize : 16 - -name : __ghash -driver : __ghash-pclmulqdqni -module : ghash_clmulni_intel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : shash -blocksize : 16 -digestsize : 16 - -name : crc32c -driver : crc32c-intel -module : crc32c_intel -priority : 200 -refcnt : 5 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : cbc(aes) -driver : cbc(aes-aesni) -module : kernel -priority : 300 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : ctr(aes) -driver : ctr(aes-aesni) -module : kernel -priority : 300 -refcnt : 5 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : pkcs1pad(rsa,sha256) -driver : pkcs1pad(rsa-generic,sha256) -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : akcipher - -name : __xts(aes) -driver : cryptd(__xts-aes-aesni) -module : kernel -priority : 451 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : xts(aes) -driver : xts-aes-aesni -module : kernel -priority : 401 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ctr(aes) -driver : cryptd(__ctr-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 1 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : ctr(aes) -driver : ctr-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __cbc(aes) -driver : cryptd(__cbc-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : cbc(aes) -driver : cbc-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ecb(aes) -driver : cryptd(__ecb-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : ecb(aes) -driver : ecb-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : __generic-gcm-aes-aesni -driver : cryptd(__driver-generic-gcm-aes-aesni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : yes -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : gcm(aes) -driver : generic-gcm-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : aead -async : yes -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : __generic-gcm-aes-aesni -driver : __driver-generic-gcm-aes-aesni -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : no -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : __gcm-aes-aesni -driver : cryptd(__driver-gcm-aes-aesni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : yes -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : rfc4106(gcm(aes)) -driver : rfc4106-gcm-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : aead -async : yes -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : __gcm-aes-aesni -driver : __driver-gcm-aes-aesni -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : no -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : __xts(aes) -driver : __xts-aes-aesni -module : kernel -priority : 401 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ctr(aes) -driver : __ctr-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __cbc(aes) -driver : __cbc-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ecb(aes) -driver : __ecb-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : __aes -driver : __aes-aesni -module : kernel -priority : 300 -refcnt : 1 -selftest : passed -internal : yes -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : aes -driver : aes-aesni -module : kernel -priority : 300 -refcnt : 8 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : hmac(sha1) -driver : hmac(sha1-generic) -module : kernel -priority : 100 -refcnt : 9 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 20 - -name : ghash -driver : ghash-generic -module : kernel -priority : 100 -refcnt : 3 -selftest : passed -internal : no -type : shash -blocksize : 16 -digestsize : 16 - -name : jitterentropy_rng -driver : jitterentropy_rng -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha256 -module : kernel -priority : 221 -refcnt : 2 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha512 -module : kernel -priority : 220 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha384 -module : kernel -priority : 219 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha1 -module : kernel -priority : 218 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha256 -module : kernel -priority : 217 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha512 -module : kernel -priority : 216 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha384 -module : kernel -priority : 215 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha1 -module : kernel -priority : 214 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes256 -module : kernel -priority : 213 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes192 -module : kernel -priority : 212 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes128 -module : kernel -priority : 211 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : hmac(sha256) -driver : hmac(sha256-generic) -module : kernel -priority : 100 -refcnt : 10 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 32 - -name : stdrng -driver : drbg_pr_hmac_sha256 -module : kernel -priority : 210 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha512 -module : kernel -priority : 209 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha384 -module : kernel -priority : 208 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha1 -module : kernel -priority : 207 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha256 -module : kernel -priority : 206 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha512 -module : kernel -priority : 205 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha384 -module : kernel -priority : 204 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha1 -module : kernel -priority : 203 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes256 -module : kernel -priority : 202 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes192 -module : kernel -priority : 201 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes128 -module : kernel -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : 842 -driver : 842-scomp -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : 842 -driver : 842-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : lzo-rle -driver : lzo-rle-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : lzo-rle -driver : lzo-rle-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : lzo -driver : lzo-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : lzo -driver : lzo-generic -module : kernel -priority : 0 -refcnt : 9 -selftest : passed -internal : no -type : compression - -name : crct10dif -driver : crct10dif-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 2 - -name : crc32c -driver : crc32c-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : zlib-deflate -driver : zlib-deflate-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : deflate -driver : deflate-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : deflate -driver : deflate-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : aes -driver : aes-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : sha224 -driver : sha224-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 28 - -name : sha256 -driver : sha256-generic -module : kernel -priority : 100 -refcnt : 11 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 32 - -name : sha1 -driver : sha1-generic -module : kernel -priority : 100 -refcnt : 11 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 20 - -name : md5 -driver : md5-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 16 - -name : ecb(cipher_null) -driver : ecb-cipher_null -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 0 -max keysize : 0 -ivsize : 0 -chunksize : 1 -walksize : 1 - -name : digest_null -driver : digest_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 0 - -name : compress_null -driver : compress_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : cipher_null -driver : cipher_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 1 -min keysize : 0 -max keysize : 0 - -name : rsa -driver : rsa-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : akcipher - -name : dh -driver : dh-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : kpp - -name : aes -driver : aes-asm -module : kernel -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -Mode: 444 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/diskstats -Lines: 52 - 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 - 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 - 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 - 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0 - 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0 - 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0 - 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0 - 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0 - 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0 - 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0 - 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0 - 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0 - 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0 - 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0 - 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0 - 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0 - 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0 - 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0 - 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0 - 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0 - 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0 - 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0 - 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0 - 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0 - 8 0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804 - 8 1 sda1 250 0 2000 36 0 0 0 0 0 36 36 - 8 2 sda2 246 0 1968 32 0 0 0 0 0 32 32 - 8 3 sda3 340 13 2818 52 11 8 152 8 0 56 60 - 8 4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428 - 252 0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256 - 252 1 dm-1 388 0 3104 84 74 0 592 0 0 76 84 - 252 2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416 - 252 3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104 - 252 4 dm-4 392 0 1034 28 38 0 137 16 0 24 44 - 252 5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632 - 179 0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156 - 179 1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24 - 179 2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68 - 2 0 fd0 2 0 16 80 0 0 0 0 0 80 80 - 254 0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228 - 254 1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720 - 254 2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992 - 11 0 sr0 0 0 0 0 0 0 0 0 0 0 0 - 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546 - 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16 - 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970 - 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 - 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 - 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 - 8 0 sdc 14202 71 579164 21861 2995 1589 180500 40875 0 11628 55200 0 0 0 0 127 182 - 8 1 sdc1 1027 0 13795 5021 2 0 4096 3 0 690 4579 0 0 0 0 0 0 - 8 2 sdc2 13126 71 561749 16802 2830 1589 176404 40620 0 10931 50449 0 0 0 0 0 0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs/fscache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/fs/fscache/stats -Lines: 24 -FS-Cache statistics -Cookies: idx=3 dat=67877 spc=0 -Objects: alc=67473 nal=0 avl=67473 ded=388 -ChkAux : non=12 ok=33 upd=44 obs=55 -Pages : mrk=547164 unc=364577 -Acquire: n=67880 nul=98 noc=25 ok=67780 nbf=39 oom=26 -Lookups: n=67473 neg=67470 pos=58 crt=67473 tmo=85 -Invals : n=14 run=13 -Updates: n=7 nul=3 run=8 -Relinqs: n=394 nul=1 wcr=2 rtr=3 -AttrChg: n=6 ok=5 nbf=4 oom=3 run=2 -Allocs : n=20 ok=19 wt=18 nbf=17 int=16 -Allocs : ops=15 owt=14 abt=13 -Retrvls: n=151959 ok=82823 wt=23467 nod=69136 nbf=15 int=69 oom=43 -Retrvls: ops=151959 owt=42747 abt=44 -Stores : n=225565 ok=225565 agn=12 nbf=13 oom=14 -Stores : ops=69156 run=294721 pgs=225565 rxd=225565 olm=43 -VmScan : nos=364512 gon=2 bsy=43 can=12 wt=66 -Ops : pend=42753 run=221129 enq=628798 can=11 rej=88 -Ops : ini=377538 dfr=27 rel=377538 gc=37 -CacheOp: alo=1 luo=2 luc=3 gro=4 -CacheOp: inv=5 upo=6 dro=7 pto=8 atc=9 syn=10 -CacheOp: rap=11 ras=12 alp=13 als=14 wrp=15 ucp=16 dsp=17 -CacheEv: nsp=18 stl=19 rtr=20 cul=21EOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/fs/xfs/stat -Lines: 23 -extent_alloc 92447 97589 92448 93751 -abt 0 0 0 0 -blk_map 1767055 188820 184891 92447 92448 2140766 0 -bmbt 0 0 0 0 -dir 185039 92447 92444 136422 -trans 706 944304 0 -ig 185045 58807 0 126238 0 33637 22 -log 2883 113448 9 17360 739 -push_ail 945014 0 134260 15483 0 3940 464 159985 0 40 -xstrat 92447 0 -rw 107739 94045 -attr 4 0 0 0 -icluster 8677 7849 135802 -vnodes 92601 0 0 0 92444 92444 92444 0 -buf 2666287 7122 2659202 3599 2 7085 0 10297 7085 -abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147 -abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023 -bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0 -fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -qm 0 0 0 0 0 0 0 0 -xpc 399724544 92823103 86219234 -debug 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/loadavg -Lines: 1 -0.02 0.04 0.05 1/497 11947 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/mdstat -Lines: 60 -Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] - -md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S) - 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] - -md127 : active raid1 sdi2[0] sdj2[1] - 312319552 blocks [2/2] [UU] - -md0 : active raid1 sdi1[0] sdj1[1] - 248896 blocks [2/2] [UU] - -md4 : inactive raid1 sda3[0](F) sdb3[1](S) - 4883648 blocks [2/2] [UU] - -md6 : active raid1 sdb2[2](F) sdc[1](S) sda2[0] - 195310144 blocks [2/1] [U_] - [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S) - 195310144 blocks [2/2] [UU] - [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md201 : active raid1 sda3[0] sdb3[1] - 1993728 blocks super 1.2 [2/2] [UU] - [=>...................] check = 5.7% (114176/1993728) finish=0.2min speed=114176K/sec - -md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F) - 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] - bitmap: 0/30 pages [0KB], 65536KB chunk - -md9 : active raid1 sdc2[2] sdd2[3] sdb2[1] sda2[0] sde[4](F) sdf[5](F) sdg[6](S) - 523968 blocks super 1.2 [4/4] [UUUU] - resync=DELAYED - -md10 : active raid0 sda1[0] sdb1[1] - 314159265 blocks 64k chunks - -md11 : active (auto-read-only) raid1 sdb2[0] sdc2[1] sdc3[2](F) hda[4](S) ssdc2[3](S) - 4190208 blocks super 1.2 [2/2] [UU] - resync=PENDING - -md12 : active raid0 sdc2[0] sdd2[1] - 3886394368 blocks super 1.2 512k chunks - -md126 : active raid0 sdb[1] sdc[0] - 1855870976 blocks super external:/md127/0 128k chunks - -md219 : inactive sdb[2](S) sdc[1](S) sda[0](S) - 7932 blocks super external:imsm - -md00 : active raid0 xvdb[0] - 4186624 blocks super 1.2 256k chunks - -md120 : active linear sda1[1] sdb1[0] - 2095104 blocks super 1.2 0k rounding - -md101 : active (read-only) raid0 sdb[2] sdd[1] sdc[0] - 322560 blocks super 1.2 512k chunks - -unused devices: -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/meminfo -Lines: 42 -MemTotal: 15666184 kB -MemFree: 440324 kB -Buffers: 1020128 kB -Cached: 12007640 kB -SwapCached: 0 kB -Active: 6761276 kB -Inactive: 6532708 kB -Active(anon): 267256 kB -Inactive(anon): 268 kB -Active(file): 6494020 kB -Inactive(file): 6532440 kB -Unevictable: 0 kB -Mlocked: 0 kB -SwapTotal: 0 kB -SwapFree: 0 kB -Dirty: 768 kB -Writeback: 0 kB -AnonPages: 266216 kB -Mapped: 44204 kB -Shmem: 1308 kB -Slab: 1807264 kB -SReclaimable: 1738124 kB -SUnreclaim: 69140 kB -KernelStack: 1616 kB -PageTables: 5288 kB -NFS_Unstable: 0 kB -Bounce: 0 kB -WritebackTmp: 0 kB -CommitLimit: 7833092 kB -Committed_AS: 530844 kB -VmallocTotal: 34359738367 kB -VmallocUsed: 36596 kB -VmallocChunk: 34359637840 kB -HardwareCorrupted: 0 kB -AnonHugePages: 12288 kB -HugePages_Total: 0 -HugePages_Free: 0 -HugePages_Rsvd: 0 -HugePages_Surp: 0 -Hugepagesize: 2048 kB -DirectMap4k: 91136 kB -DirectMap2M: 16039936 kB -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/arp -Lines: 2 -IP address HW type Flags HW address Mask Device -192.168.224.1 0x1 0x2 00:50:56:c0:00:08 * ens33 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/dev -Lines: 6 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed -vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0 -docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/ip_vs -Lines: 21 -IP Virtual Server version 1.2.1 (size=4096) -Prot LocalAddress:Port Scheduler Flags - -> RemoteAddress:Port Forward Weight ActiveConn InActConn -TCP C0A80016:0CEA wlc - -> C0A85216:0CEA Tunnel 100 248 2 - -> C0A85318:0CEA Tunnel 100 248 2 - -> C0A85315:0CEA Tunnel 100 248 1 -TCP C0A80039:0CEA wlc - -> C0A85416:0CEA Tunnel 0 0 0 - -> C0A85215:0CEA Tunnel 100 1499 0 - -> C0A83215:0CEA Tunnel 100 1498 0 -TCP C0A80037:0CEA wlc - -> C0A8321A:0CEA Tunnel 0 0 0 - -> C0A83120:0CEA Tunnel 100 0 0 -TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh - -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1 -FWM 10001000 wlc - -> C0A8321A:0CEA Route 0 0 1 - -> C0A83215:0CEA Route 0 0 2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/ip_vs_stats -Lines: 6 - Total Incoming Outgoing Incoming Outgoing - Conns Packets Packets Bytes Bytes - 16AA370 E33656E5 0 51D8C8883AB3 0 - - Conns/s Pkts/s Pkts/s Bytes/s Bytes/s - 4 1FB3C 0 1282A8F 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/protocols -Lines: 14 -protocol size sockets memory press maxhdr slab module cl co di ac io in de sh ss gs se re sp bi br ha uh gp em -PACKET 1344 2 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n -PINGv6 1112 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n -RAWv6 1112 1 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n -UDPLITEv6 1216 0 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n -UDPv6 1216 10 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n -TCPv6 2144 1937 1225378 no 320 yes kernel y y y y y y y y y y y y y n y y y y y -UNIX 1024 120 -1 NI 0 yes kernel n n n n n n n n n n n n n n n n n n n -UDP-Lite 1024 0 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n -PING 904 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n -RAW 912 0 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n -UDP 1024 73 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n -TCP 1984 93064 1225378 yes 320 yes kernel y y y y y y y y y y y y y n y y y y y -NETLINK 1040 16 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net/rpc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/rpc/nfs -Lines: 5 -net 18628 0 18628 6 -rpc 4329785 0 4338291 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 -proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/rpc/nfsd -Lines: 11 -rc 0 6 18622 -fh 0 0 0 0 0 -io 157286400 0 -th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 -ra 32 0 0 0 0 0 0 0 0 0 0 0 -net 18628 0 18628 6 -rpc 18628 0 0 0 0 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 -proc4 2 2 10853 -proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/sockstat -Lines: 6 -sockets: used 1602 -TCP: inuse 35 orphan 0 tw 4 alloc 59 mem 22 -UDP: inuse 12 mem 62 -UDPLITE: inuse 0 -RAW: inuse 0 -FRAG: inuse 0 memory 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/sockstat6 -Lines: 5 -TCP6: inuse 17 -UDP6: inuse 9 -UDPLITE6: inuse 0 -RAW6: inuse 1 -FRAG6: inuse 0 memory 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/softnet_stat -Lines: 2 -00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 -01663fb2 00000000 000109a4 00000000 00000000 00000000 00000000 00000000 00000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/softnet_stat.broken -Lines: 1 -00015c73 00020e76 F0000769 00000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net/stat -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/stat/arp_cache -Lines: 3 -entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls -00000014 00000001 00000002 00000003 00000004 00000005 00000006 00000007 00000008 00000009 0000000a 0000000b 0000000c -00000014 0000000d 0000000e 0000000f 00000010 00000011 00000012 00000013 00000014 00000015 00000016 00000017 00000018 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/stat/ndisc_cache -Lines: 3 -entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls -00000024 000000f0 000000f1 000000f2 000000f3 000000f4 000000f5 000000f6 000000f7 000000f8 000000f9 000000fa 000000fb -00000024 000000fc 000000fd 000000fe 000000ff 00000100 00000101 00000102 00000103 00000104 00000105 00000106 00000107 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/tcp -Lines: 4 - sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode - 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/tcp6 -Lines: 3 - sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops - 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0 - 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp -Lines: 4 - sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode - 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp6 -Lines: 3 - sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops - 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0 - 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp_broken -Lines: 2 - sl local_address rem_address st - 1: 00000000:0016 00000000:0000 0A -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/unix -Lines: 6 -Num RefCount Protocol Flags Type St Inode Path -0000000000000000: 00000002 00000000 00010000 0001 01 3442596 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 0000000a 00000000 00010000 0005 01 10061 /run/udev/control -0000000000000000: 00000007 00000000 00000000 0002 01 12392 /dev/log -0000000000000000: 00000003 00000000 00000000 0001 03 4787297 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 00000003 00000000 00000000 0001 03 5091797 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/unix_without_inode -Lines: 6 -Num RefCount Protocol Flags Type St Path -0000000000000000: 00000002 00000000 00010000 0001 01 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 0000000a 00000000 00010000 0005 01 /run/udev/control -0000000000000000: 00000007 00000000 00000000 0002 01 /dev/log -0000000000000000: 00000003 00000000 00000000 0001 03 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 00000003 00000000 00000000 0001 03 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/xfrm_stat -Lines: 28 -XfrmInError 1 -XfrmInBufferError 2 -XfrmInHdrError 4 -XfrmInNoStates 3 -XfrmInStateProtoError 40 -XfrmInStateModeError 100 -XfrmInStateSeqError 6000 -XfrmInStateExpired 4 -XfrmInStateMismatch 23451 -XfrmInStateInvalid 55555 -XfrmInTmplMismatch 51 -XfrmInNoPols 65432 -XfrmInPolBlock 100 -XfrmInPolError 10000 -XfrmOutError 1000000 -XfrmOutBundleGenError 43321 -XfrmOutBundleCheckError 555 -XfrmOutNoStates 869 -XfrmOutStateProtoError 4542 -XfrmOutStateModeError 4 -XfrmOutStateSeqError 543 -XfrmOutStateExpired 565 -XfrmOutPolBlock 43456 -XfrmOutPolDead 7656 -XfrmOutPolError 1454 -XfrmFwdHdrError 6654 -XfrmOutStateInvalid 28765 -XfrmAcquireError 24532 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/pressure -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/cpu -Lines: 1 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/io -Lines: 2 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -full avg10=0.20 avg60=3.00 avg300=4.95 total=25 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/memory -Lines: 2 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -full avg10=0.20 avg60=3.00 avg300=4.95 total=25 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/schedstat -Lines: 6 -version 15 -timestamp 15819019232 -cpu0 498494191 0 3533438552 2553969831 3853684107 2465731542 2045936778163039 343796328169361 4767485306 -domain0 00000000,00000003 212499247 210112015 1861015 1860405436 536440 369895 32599 210079416 25368550 24241256 384652 927363878 807233 6366 1647 24239609 2122447165 1886868564 121112060 2848625533 125678146 241025 1032026 1885836538 2545 12 2533 0 0 0 0 0 0 1387952561 21076581 0 -cpu1 518377256 0 4155211005 2778589869 10466382 2867629021 1904686152592476 364107263788241 5145567945 -domain0 00000000,00000003 217653037 215526982 1577949 1580427380 557469 393576 28538 215498444 28721913 27662819 371153 870843407 745912 5523 1639 27661180 2331056874 2107732788 111442342 652402556 123615235 196159 1045245 2106687543 2400 3 2397 0 0 0 0 0 0 1437804657 26220076 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/self -SymlinkTo: 26231 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/slabinfo -Lines: 302 -slabinfo - version: 2.1 -# name : tunables : slabdata -pid_3 375 532 576 28 4 : tunables 0 0 0 : slabdata 19 19 0 -pid_2 3 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -nvidia_p2p_page_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -nvidia_pte_cache 9022 9152 368 22 2 : tunables 0 0 0 : slabdata 416 416 0 -nvidia_stack_cache 321 326 12624 2 8 : tunables 0 0 0 : slabdata 163 163 0 -kvm_async_pf 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0 -kvm_vcpu 0 0 15552 2 8 : tunables 0 0 0 : slabdata 0 0 0 -kvm_mmu_page_header 0 0 504 32 4 : tunables 0 0 0 : slabdata 0 0 0 -pte_list_desc 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -x86_emulator 0 0 3024 10 8 : tunables 0 0 0 : slabdata 0 0 0 -x86_fpu 0 0 4608 7 8 : tunables 0 0 0 : slabdata 0 0 0 -iwl_cmd_pool:0000:04:00.0 0 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0 -ext4_groupinfo_4k 3719 3740 480 34 4 : tunables 0 0 0 : slabdata 110 110 0 -bio-6 32 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 -bio-5 16 48 1344 24 8 : tunables 0 0 0 : slabdata 2 2 0 -bio-4 17 92 1408 23 8 : tunables 0 0 0 : slabdata 4 4 0 -fat_inode_cache 0 0 1056 31 8 : tunables 0 0 0 : slabdata 0 0 0 -fat_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -ovl_aio_req 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -ovl_inode 0 0 1000 32 8 : tunables 0 0 0 : slabdata 0 0 0 -squashfs_inode_cache 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0 -fuse_request 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0 -fuse_inode 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_dqtrx 0 0 864 37 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_dquot 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_buf 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_bui_item 0 0 544 30 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_bud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_cui_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_cud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_rui_item 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_rud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_icr 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_ili 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_inode 0 0 1344 24 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_efi_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_efd_item 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_buf_item 0 0 608 26 4 : tunables 0 0 0 : slabdata 0 0 0 -xf_trans 0 0 568 28 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_ifork 0 0 376 21 2 : tunables 0 0 0 : slabdata 0 0 0 -xfs_da_state 0 0 816 20 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_btree_cur 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_bmap_free_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -xfs_log_ticket 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 -nfs_direct_cache 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0 -nfs_commit_data 4 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0 -nfs_write_data 32 50 1280 25 8 : tunables 0 0 0 : slabdata 2 2 0 -nfs_read_data 0 0 1280 25 8 : tunables 0 0 0 : slabdata 0 0 0 -nfs_inode_cache 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 -nfs_page 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -rpc_inode_cache 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -rpc_buffers 8 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 -rpc_tasks 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -fscache_cookie_jar 1 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0 -jfs_mp 32 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0 -jfs_ip 0 0 1592 20 8 : tunables 0 0 0 : slabdata 0 0 0 -reiser_inode_cache 0 0 1096 29 8 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_end_io_wq 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_prelim_ref 0 0 424 38 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_extent_op 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_data_ref 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_tree_ref 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_ref_head 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_inode_defrag 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_node 0 0 648 25 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_ordered_extent 0 0 752 21 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_extent_map 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_extent_state 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -bio-3 35 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0 -btrfs_extent_buffer 0 0 600 27 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_free_space_bitmap 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_free_space 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_path 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_trans_handle 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_inode 0 0 1496 21 8 : tunables 0 0 0 : slabdata 0 0 0 -ext4_inode_cache 84136 84755 1400 23 8 : tunables 0 0 0 : slabdata 3685 3685 0 -ext4_free_data 22 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 -ext4_allocation_context 0 70 464 35 4 : tunables 0 0 0 : slabdata 2 2 0 -ext4_prealloc_space 24 74 440 37 4 : tunables 0 0 0 : slabdata 2 2 0 -ext4_system_zone 267 273 376 21 2 : tunables 0 0 0 : slabdata 13 13 0 -ext4_io_end_vec 0 88 368 22 2 : tunables 0 0 0 : slabdata 4 4 0 -ext4_io_end 0 80 400 20 2 : tunables 0 0 0 : slabdata 4 4 0 -ext4_bio_post_read_ctx 128 147 384 21 2 : tunables 0 0 0 : slabdata 7 7 0 -ext4_pending_reservation 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -ext4_extent_status 79351 79422 376 21 2 : tunables 0 0 0 : slabdata 3782 3782 0 -jbd2_transaction_s 44 100 640 25 4 : tunables 0 0 0 : slabdata 4 4 0 -jbd2_inode 6785 6840 400 20 2 : tunables 0 0 0 : slabdata 342 342 0 -jbd2_journal_handle 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 -jbd2_journal_head 824 1944 448 36 4 : tunables 0 0 0 : slabdata 54 54 0 -jbd2_revoke_table_s 4 23 352 23 2 : tunables 0 0 0 : slabdata 1 1 0 -jbd2_revoke_record_s 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0 -ext2_inode_cache 0 0 1144 28 8 : tunables 0 0 0 : slabdata 0 0 0 -mbcache 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -dm_thin_new_mapping 0 152 424 38 4 : tunables 0 0 0 : slabdata 4 4 0 -dm_snap_pending_exception 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0 -dm_exception 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dm_dirty_log_flush_entry 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dm_bio_prison_cell_v2 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0 -dm_bio_prison_cell 0 148 432 37 4 : tunables 0 0 0 : slabdata 4 4 0 -kcopyd_job 0 8 3648 8 8 : tunables 0 0 0 : slabdata 1 1 0 -io 0 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 -dm_uevent 0 0 3224 10 8 : tunables 0 0 0 : slabdata 0 0 0 -dax_cache 1 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0 -aic94xx_ascb 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -aic94xx_dma_token 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -asd_sas_event 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -sas_task 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0 -qla2xxx_srbs 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0 -sd_ext_cdb 2 22 368 22 2 : tunables 0 0 0 : slabdata 1 1 0 -scsi_sense_cache 258 288 512 32 4 : tunables 0 0 0 : slabdata 9 9 0 -virtio_scsi_cmd 64 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 -L2TP/IPv6 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -L2TP/IP 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 -ip6-frags 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 -fib6_nodes 5 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 -ip6_dst_cache 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -ip6_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -PINGv6 0 0 1600 20 8 : tunables 0 0 0 : slabdata 0 0 0 -RAWv6 25 40 1600 20 8 : tunables 0 0 0 : slabdata 2 2 0 -UDPLITEv6 0 0 1728 18 8 : tunables 0 0 0 : slabdata 0 0 0 -UDPv6 3 54 1728 18 8 : tunables 0 0 0 : slabdata 3 3 0 -tw_sock_TCPv6 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -request_sock_TCPv6 0 0 632 25 4 : tunables 0 0 0 : slabdata 0 0 0 -TCPv6 0 33 2752 11 8 : tunables 0 0 0 : slabdata 3 3 0 -uhci_urb_priv 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -sgpool-128 2 14 4544 7 8 : tunables 0 0 0 : slabdata 2 2 0 -sgpool-64 2 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 -sgpool-32 2 44 1472 22 8 : tunables 0 0 0 : slabdata 2 2 0 -sgpool-16 2 68 960 34 8 : tunables 0 0 0 : slabdata 2 2 0 -sgpool-8 2 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 -btree_node 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -bfq_io_cq 0 0 488 33 4 : tunables 0 0 0 : slabdata 0 0 0 -bfq_queue 0 0 848 38 8 : tunables 0 0 0 : slabdata 0 0 0 -mqueue_inode_cache 1 24 1344 24 8 : tunables 0 0 0 : slabdata 1 1 0 -isofs_inode_cache 0 0 968 33 8 : tunables 0 0 0 : slabdata 0 0 0 -io_kiocb 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 -kioctx 0 30 1088 30 8 : tunables 0 0 0 : slabdata 1 1 0 -aio_kiocb 0 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -userfaultfd_ctx_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -fanotify_path_event 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -fanotify_fid_event 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -fsnotify_mark 0 0 408 20 2 : tunables 0 0 0 : slabdata 0 0 0 -dnotify_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -dnotify_struct 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dio 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0 -bio-2 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -fasync_cache 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -audit_tree_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -pid_namespace 30 34 480 34 4 : tunables 0 0 0 : slabdata 1 1 0 -posix_timers_cache 0 27 592 27 4 : tunables 0 0 0 : slabdata 1 1 0 -iommu_devinfo 24 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 -iommu_domain 10 10 3264 10 8 : tunables 0 0 0 : slabdata 1 1 0 -iommu_iova 8682 8748 448 36 4 : tunables 0 0 0 : slabdata 243 243 0 -UNIX 529 814 1472 22 8 : tunables 0 0 0 : slabdata 37 37 0 -ip4-frags 0 0 536 30 4 : tunables 0 0 0 : slabdata 0 0 0 -ip_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -UDP-Lite 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -tcp_bind_bucket 7 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0 -inet_peer_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -xfrm_dst_cache 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0 -xfrm_state 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0 -ip_fib_trie 7 21 384 21 2 : tunables 0 0 0 : slabdata 1 1 0 -ip_fib_alias 9 20 392 20 2 : tunables 0 0 0 : slabdata 1 1 0 -ip_dst_cache 27 84 576 28 4 : tunables 0 0 0 : slabdata 3 3 0 -PING 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 -RAW 32 46 1408 23 8 : tunables 0 0 0 : slabdata 2 2 0 -UDP 11 168 1536 21 8 : tunables 0 0 0 : slabdata 8 8 0 -tw_sock_TCP 1 56 576 28 4 : tunables 0 0 0 : slabdata 2 2 0 -request_sock_TCP 0 25 632 25 4 : tunables 0 0 0 : slabdata 1 1 0 -TCP 10 60 2624 12 8 : tunables 0 0 0 : slabdata 5 5 0 -hugetlbfs_inode_cache 2 35 928 35 8 : tunables 0 0 0 : slabdata 1 1 0 -dquot 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 -bio-1 32 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 -eventpoll_pwq 409 600 408 20 2 : tunables 0 0 0 : slabdata 30 30 0 -eventpoll_epi 408 672 576 28 4 : tunables 0 0 0 : slabdata 24 24 0 -inotify_inode_mark 58 195 416 39 4 : tunables 0 0 0 : slabdata 5 5 0 -scsi_data_buffer 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 -bio_crypt_ctx 128 147 376 21 2 : tunables 0 0 0 : slabdata 7 7 0 -request_queue 29 39 2408 13 8 : tunables 0 0 0 : slabdata 3 3 0 -blkdev_ioc 81 148 440 37 4 : tunables 0 0 0 : slabdata 4 4 0 -bio-0 125 200 640 25 4 : tunables 0 0 0 : slabdata 8 8 0 -biovec-max 166 196 4544 7 8 : tunables 0 0 0 : slabdata 28 28 0 -biovec-128 0 52 2496 13 8 : tunables 0 0 0 : slabdata 4 4 0 -biovec-64 0 88 1472 22 8 : tunables 0 0 0 : slabdata 4 4 0 -biovec-16 0 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0 -bio_integrity_payload 4 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -khugepaged_mm_slot 59 180 448 36 4 : tunables 0 0 0 : slabdata 5 5 0 -ksm_mm_slot 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -ksm_stable_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -ksm_rmap_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -user_namespace 2 37 864 37 8 : tunables 0 0 0 : slabdata 1 1 0 -uid_cache 5 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-256 1 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-128 1 22 1472 22 8 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-16 1 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-2 1 36 448 36 4 : tunables 0 0 0 : slabdata 1 1 0 -audit_buffer 0 22 360 22 2 : tunables 0 0 0 : slabdata 1 1 0 -sock_inode_cache 663 1170 1216 26 8 : tunables 0 0 0 : slabdata 45 45 0 -skbuff_ext_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -skbuff_fclone_cache 1 72 896 36 8 : tunables 0 0 0 : slabdata 2 2 0 -skbuff_head_cache 3 650 640 25 4 : tunables 0 0 0 : slabdata 26 26 0 -configfs_dir_cache 7 38 424 38 4 : tunables 0 0 0 : slabdata 1 1 0 -file_lock_cache 27 116 552 29 4 : tunables 0 0 0 : slabdata 4 4 0 -file_lock_ctx 106 120 392 20 2 : tunables 0 0 0 : slabdata 6 6 0 -fsnotify_mark_connector 52 66 368 22 2 : tunables 0 0 0 : slabdata 3 3 0 -net_namespace 1 6 5312 6 8 : tunables 0 0 0 : slabdata 1 1 0 -task_delay_info 784 1560 416 39 4 : tunables 0 0 0 : slabdata 40 40 0 -taskstats 45 92 688 23 4 : tunables 0 0 0 : slabdata 4 4 0 -proc_dir_entry 678 682 528 31 4 : tunables 0 0 0 : slabdata 22 22 0 -pde_opener 0 189 376 21 2 : tunables 0 0 0 : slabdata 9 9 0 -proc_inode_cache 7150 8250 992 33 8 : tunables 0 0 0 : slabdata 250 250 0 -seq_file 60 735 456 35 4 : tunables 0 0 0 : slabdata 21 21 0 -sigqueue 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0 -bdev_cache 36 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0 -shmem_inode_cache 1599 2208 1016 32 8 : tunables 0 0 0 : slabdata 69 69 0 -kernfs_iattrs_cache 1251 1254 424 38 4 : tunables 0 0 0 : slabdata 33 33 0 -kernfs_node_cache 52898 52920 464 35 4 : tunables 0 0 0 : slabdata 1512 1512 0 -mnt_cache 42 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 -filp 4314 6371 704 23 4 : tunables 0 0 0 : slabdata 277 277 0 -inode_cache 28695 29505 920 35 8 : tunables 0 0 0 : slabdata 843 843 0 -dentry 166069 169074 528 31 4 : tunables 0 0 0 : slabdata 5454 5454 0 -names_cache 0 35 4544 7 8 : tunables 0 0 0 : slabdata 5 5 0 -hashtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 -ebitmap_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -avtab_extended_perms 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -avtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_xperms_data 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_xperms_decision_node 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_xperms_node 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_node 37 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0 -iint_cache 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 -lsm_inode_cache 122284 122340 392 20 2 : tunables 0 0 0 : slabdata 6117 6117 0 -lsm_file_cache 4266 4485 352 23 2 : tunables 0 0 0 : slabdata 195 195 0 -key_jar 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -buffer_head 255622 257076 440 37 4 : tunables 0 0 0 : slabdata 6948 6948 0 -uts_namespace 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0 -nsproxy 31 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0 -vm_area_struct 39115 43214 528 31 4 : tunables 0 0 0 : slabdata 1394 1394 0 -mm_struct 96 529 1408 23 8 : tunables 0 0 0 : slabdata 23 23 0 -fs_cache 102 756 448 36 4 : tunables 0 0 0 : slabdata 21 21 0 -files_cache 102 588 1152 28 8 : tunables 0 0 0 : slabdata 21 21 0 -signal_cache 266 672 1536 21 8 : tunables 0 0 0 : slabdata 32 32 0 -sighand_cache 266 507 2496 13 8 : tunables 0 0 0 : slabdata 39 39 0 -task_struct 783 963 10240 3 8 : tunables 0 0 0 : slabdata 321 321 0 -cred_jar 364 952 576 28 4 : tunables 0 0 0 : slabdata 34 34 0 -anon_vma_chain 63907 67821 416 39 4 : tunables 0 0 0 : slabdata 1739 1739 0 -anon_vma 25891 28899 416 39 4 : tunables 0 0 0 : slabdata 741 741 0 -pid 408 992 512 32 4 : tunables 0 0 0 : slabdata 31 31 0 -Acpi-Operand 6682 6740 408 20 2 : tunables 0 0 0 : slabdata 337 337 0 -Acpi-ParseExt 0 39 416 39 4 : tunables 0 0 0 : slabdata 1 1 0 -Acpi-Parse 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 -Acpi-State 0 78 416 39 4 : tunables 0 0 0 : slabdata 2 2 0 -Acpi-Namespace 3911 3948 384 21 2 : tunables 0 0 0 : slabdata 188 188 0 -trace_event_file 2638 2660 424 38 4 : tunables 0 0 0 : slabdata 70 70 0 -ftrace_event_field 6592 6594 384 21 2 : tunables 0 0 0 : slabdata 314 314 0 -pool_workqueue 41 64 1024 32 8 : tunables 0 0 0 : slabdata 2 2 0 -radix_tree_node 21638 24045 912 35 8 : tunables 0 0 0 : slabdata 687 687 0 -task_group 48 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0 -vmap_area 4411 4680 400 20 2 : tunables 0 0 0 : slabdata 234 234 0 -dma-kmalloc-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-128 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-64 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-96 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-128 31 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 -kmalloc-rcl-96 3371 3626 432 37 4 : tunables 0 0 0 : slabdata 98 98 0 -kmalloc-rcl-64 2080 2272 512 32 4 : tunables 0 0 0 : slabdata 71 71 0 -kmalloc-rcl-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-8k 133 140 24576 1 8 : tunables 0 0 0 : slabdata 140 140 0 -kmalloc-4k 403 444 12288 2 8 : tunables 0 0 0 : slabdata 222 222 0 -kmalloc-2k 2391 2585 6144 5 8 : tunables 0 0 0 : slabdata 517 517 0 -kmalloc-1k 2163 2420 3072 10 8 : tunables 0 0 0 : slabdata 242 242 0 -kmalloc-512 2972 3633 1536 21 8 : tunables 0 0 0 : slabdata 173 173 0 -kmalloc-256 1841 1856 1024 32 8 : tunables 0 0 0 : slabdata 58 58 0 -kmalloc-192 2165 2914 528 31 4 : tunables 0 0 0 : slabdata 94 94 0 -kmalloc-128 1137 1175 640 25 4 : tunables 0 0 0 : slabdata 47 47 0 -kmalloc-96 1925 2590 432 37 4 : tunables 0 0 0 : slabdata 70 70 0 -kmalloc-64 9433 10688 512 32 4 : tunables 0 0 0 : slabdata 334 334 0 -kmalloc-32 9098 10062 416 39 4 : tunables 0 0 0 : slabdata 258 258 0 -kmalloc-16 10914 10956 368 22 2 : tunables 0 0 0 : slabdata 498 498 0 -kmalloc-8 7576 7705 344 23 2 : tunables 0 0 0 : slabdata 335 335 0 -kmem_cache_node 904 928 512 32 4 : tunables 0 0 0 : slabdata 29 29 0 -kmem_cache 904 936 832 39 8 : tunables 0 0 0 : slabdata 24 24 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/stat -Lines: 16 -cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 -cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 -cpu1 47869 23 16474 1110787 591 0 46 0 0 0 -cpu2 46504 36 15916 1112321 441 0 326 0 0 0 -cpu3 47054 102 15683 1113230 533 0 60 0 0 0 -cpu4 28413 25 10776 1140321 217 0 8 0 0 0 -cpu5 29271 101 11586 1136270 672 0 30 0 0 0 -cpu6 29152 36 10276 1139721 319 0 29 0 0 0 -cpu7 29098 268 10164 1139282 555 0 31 0 0 0 -intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ctxt 38014093 -btime 1418183276 -processes 26442 -procs_running 2 -procs_blocked 1 -softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/swaps -Lines: 2 -Filename Type Size Used Priority -/dev/dm-2 partition 131068 176 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/symlinktargets -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/README -Lines: 2 -This directory contains some empty files that are the symlinks the files in the "fd" directory point to. -They are otherwise ignored by the tests -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/abc -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/def -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/ghi -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/uvw -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/xyz -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/kernel -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/kernel/random -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/entropy_avail -Lines: 1 -3943 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/poolsize -Lines: 1 -4096 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/urandom_min_reseed_secs -Lines: 1 -60 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/write_wakeup_threshold -Lines: 1 -3072 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/vm -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/admin_reserve_kbytes -Lines: 1 -8192 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/block_dump -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/compact_unevictable_allowed -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_background_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_background_ratio -Lines: 1 -10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_expire_centisecs -Lines: 1 -3000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_ratio -Lines: 1 -20 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_writeback_centisecs -Lines: 1 -500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirtytime_expire_seconds -Lines: 1 -43200 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/drop_caches -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/extfrag_threshold -Lines: 1 -500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/hugetlb_shm_group -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/laptop_mode -Lines: 1 -5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/legacy_va_layout -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/lowmem_reserve_ratio -Lines: 1 -256 256 32 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/max_map_count -Lines: 1 -65530 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/memory_failure_early_kill -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/memory_failure_recovery -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_free_kbytes -Lines: 1 -67584 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_slab_ratio -Lines: 1 -5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_unmapped_ratio -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/mmap_min_addr -Lines: 1 -65536 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_hugepages -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_hugepages_mempolicy -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_overcommit_hugepages -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/numa_stat -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/numa_zonelist_order -Lines: 1 -Node -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/oom_dump_tasks -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/oom_kill_allocating_task -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_kbytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_memory -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_ratio -Lines: 1 -50 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/page-cluster -Lines: 1 -3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/panic_on_oom -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/percpu_pagelist_fraction -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/stat_interval -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/swappiness -Lines: 1 -60 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/user_reserve_kbytes -Lines: 1 -131072 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/vfs_cache_pressure -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/watermark_boost_factor -Lines: 1 -15000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/watermark_scale_factor -Lines: 1 -10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/zone_reclaim_mode -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/zoneinfo -Lines: 262 -Node 0, zone DMA - per-node stats - nr_inactive_anon 230981 - nr_active_anon 547580 - nr_inactive_file 316904 - nr_active_file 346282 - nr_unevictable 115467 - nr_slab_reclaimable 131220 - nr_slab_unreclaimable 47320 - nr_isolated_anon 0 - nr_isolated_file 0 - workingset_nodes 11627 - workingset_refault 466886 - workingset_activate 276925 - workingset_restore 84055 - workingset_nodereclaim 487 - nr_anon_pages 795576 - nr_mapped 215483 - nr_file_pages 761874 - nr_dirty 908 - nr_writeback 0 - nr_writeback_temp 0 - nr_shmem 224925 - nr_shmem_hugepages 0 - nr_shmem_pmdmapped 0 - nr_anon_transparent_hugepages 0 - nr_unstable 0 - nr_vmscan_write 12950 - nr_vmscan_immediate_reclaim 3033 - nr_dirtied 8007423 - nr_written 7752121 - nr_kernel_misc_reclaimable 0 - pages free 3952 - min 33 - low 41 - high 49 - spanned 4095 - present 3975 - managed 3956 - protection: (0, 2877, 7826, 7826, 7826) - nr_free_pages 3952 - nr_zone_inactive_anon 0 - nr_zone_active_anon 0 - nr_zone_inactive_file 0 - nr_zone_active_file 0 - nr_zone_unevictable 0 - nr_zone_write_pending 0 - nr_mlock 0 - nr_page_table_pages 0 - nr_kernel_stack 0 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 1 - numa_miss 0 - numa_foreign 0 - numa_interleave 0 - numa_local 1 - numa_other 0 - pagesets - cpu: 0 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 1 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 2 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 3 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 4 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 5 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 6 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 7 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - node_unreclaimable: 0 - start_pfn: 1 -Node 0, zone DMA32 - pages free 204252 - min 19510 - low 21059 - high 22608 - spanned 1044480 - present 759231 - managed 742806 - protection: (0, 0, 4949, 4949, 4949) - nr_free_pages 204252 - nr_zone_inactive_anon 118558 - nr_zone_active_anon 106598 - nr_zone_inactive_file 75475 - nr_zone_active_file 70293 - nr_zone_unevictable 66195 - nr_zone_write_pending 64 - nr_mlock 4 - nr_page_table_pages 1756 - nr_kernel_stack 2208 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 113952967 - numa_miss 0 - numa_foreign 0 - numa_interleave 0 - numa_local 113952967 - numa_other 0 - pagesets - cpu: 0 - count: 345 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 1 - count: 356 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 2 - count: 325 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 3 - count: 346 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 4 - count: 321 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 5 - count: 316 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 6 - count: 373 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 7 - count: 339 - high: 378 - batch: 63 - vm stats threshold: 48 - node_unreclaimable: 0 - start_pfn: 4096 -Node 0, zone Normal - pages free 18553 - min 11176 - low 13842 - high 16508 - spanned 1308160 - present 1308160 - managed 1268711 - protection: (0, 0, 0, 0, 0) - nr_free_pages 18553 - nr_zone_inactive_anon 112423 - nr_zone_active_anon 440982 - nr_zone_inactive_file 241429 - nr_zone_active_file 275989 - nr_zone_unevictable 49272 - nr_zone_write_pending 844 - nr_mlock 154 - nr_page_table_pages 9750 - nr_kernel_stack 15136 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 162718019 - numa_miss 0 - numa_foreign 0 - numa_interleave 26812 - numa_local 162718019 - numa_other 0 - pagesets - cpu: 0 - count: 316 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 1 - count: 366 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 2 - count: 60 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 3 - count: 256 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 4 - count: 253 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 5 - count: 159 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 6 - count: 311 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 7 - count: 264 - high: 378 - batch: 63 - vm stats threshold: 56 - node_unreclaimable: 0 - start_pfn: 1048576 -Node 0, zone Movable - pages free 0 - min 0 - low 0 - high 0 - spanned 0 - present 0 - managed 0 - protection: (0, 0, 0, 0, 0) -Node 0, zone Device - pages free 0 - min 0 - low 0 - high 0 - spanned 0 - present 0 - managed 0 - protection: (0, 0, 0, 0, 0) -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/dm-0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/dm-0/stat -Lines: 1 -6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda/queue -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/add_random -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/chunk_sectors -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/dax -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_granularity -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_max_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_max_hw_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_zeroes_data -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/fua -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/hw_sector_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_poll -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_poll_delay -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_timeout -Lines: 1 -30000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda/queue/iosched -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/back_seek_max -Lines: 1 -16384 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/back_seek_penalty -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_async -Lines: 1 -250 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_sync -Lines: 1 -125 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/low_latency -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/max_budget -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/slice_idle -Lines: 1 -8 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/slice_idle_us -Lines: 1 -8000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/strict_guarantees -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/timeout_sync -Lines: 1 -125 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iostats -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/logical_block_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_discard_segments -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_hw_sectors_kb -Lines: 1 -32767 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_integrity_segments -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_sectors_kb -Lines: 1 -1280 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_segment_size -Lines: 1 -65536 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_segments -Lines: 1 -168 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/minimum_io_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nomerges -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nr_requests -Lines: 1 -64 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nr_zones -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/optimal_io_size -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/physical_block_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/read_ahead_kb -Lines: 1 -128 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/rotational -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/rq_affinity -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/scheduler -Lines: 1 -mq-deadline kyber [bfq] none -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/wbt_lat_usec -Lines: 1 -75000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_cache -Lines: 1 -write back -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_same_max_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_zeroes_max_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/zoned -Lines: 1 -none -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/stat -Lines: 1 -9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/drm -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/drm/card0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/drm/card0/device -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/aer_dev_correctable -Lines: 9 -RxErr 0 -BadTLP 0 -BadDLLP 0 -Rollover 0 -Timeout 0 -NonFatalErr 0 -CorrIntErr 0 -HeaderOF 0 -TOTAL_ERR_COR 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/aer_dev_fatal -Lines: 19 -Undefined 0 -DLP 0 -SDES 0 -TLP 0 -FCP 0 -CmpltTO 0 -CmpltAbrt 0 -UnxCmplt 0 -RxOF 0 -MalfTLP 0 -ECRC 0 -UnsupReq 0 -ACSViol 0 -UncorrIntErr 0 -BlockedTLP 0 -AtomicOpBlocked 0 -TLPBlockedErr 0 -PoisonTLPBlocked 0 -TOTAL_ERR_FATAL 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/aer_dev_nonfatal -Lines: 19 -Undefined 0 -DLP 0 -SDES 0 -TLP 0 -FCP 0 -CmpltTO 0 -CmpltAbrt 0 -UnxCmplt 0 -RxOF 0 -MalfTLP 0 -ECRC 0 -UnsupReq 0 -ACSViol 0 -UncorrIntErr 0 -BlockedTLP 0 -AtomicOpBlocked 0 -TLPBlockedErr 0 -PoisonTLPBlocked 0 -TOTAL_ERR_NONFATAL 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/ari_enabled -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/boot_vga -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/broken_parity_status -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/class -Lines: 1 -0x030000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/consistent_dma_mask_bits -Lines: 1 -44 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/current_link_speed -Lines: 1 -8.0 GT/s PCIe -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/current_link_width -Lines: 1 -16 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/d3cold_allowed -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/device -Lines: 1 -0x687f -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/dma_mask_bits -Lines: 1 -44 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/driver_override -Lines: 1 -(null) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/gpu_busy_percent -Lines: 1 -4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/irq -Lines: 1 -95 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/local_cpulist -Lines: 1 -0-15 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/local_cpus -Lines: 1 -0000ffff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/max_link_speed -Lines: 1 -8.0 GT/s PCIe -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/max_link_width -Lines: 1 -16 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_gtt_total -Lines: 1 -8573157376 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_gtt_used -Lines: 1 -144560128 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vis_vram_total -Lines: 1 -8573157376 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vis_vram_used -Lines: 1 -1490378752 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vram_total -Lines: 1 -8573157376 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vram_used -Lines: 1 -1490378752 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vram_vendor -Lines: 1 -samsung -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/modalias -Lines: 1 -pci:v00001002d0000687Fsv00001043sd000004C4bc03sc00i00 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/msi_bus -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/numa_node -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pcie_bw -Lines: 1 -6641 815 256 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pcie_replay_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/power_dpm_force_performance_level -Lines: 1 -manual -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/power_dpm_state -Lines: 1 -performance -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/power_state -Lines: 1 -D0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_cur_state -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_dcefclk -Lines: 5 -0: 600Mhz * -1: 720Mhz -2: 800Mhz -3: 847Mhz -4: 900Mhz -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_mclk -Lines: 4 -0: 167Mhz * -1: 500Mhz -2: 800Mhz -3: 945Mhz -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_pcie -Lines: 2 -0: 8.0GT/s, x16 -1: 8.0GT/s, x16 * -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_sclk -Lines: 8 -0: 852Mhz * -1: 991Mhz -2: 1084Mhz -3: 1138Mhz -4: 1200Mhz -5: 1401Mhz -6: 1536Mhz -7: 1630Mhz -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_socclk -Lines: 8 -0: 600Mhz -1: 720Mhz * -2: 800Mhz -3: 847Mhz -4: 900Mhz -5: 960Mhz -6: 1028Mhz -7: 1107Mhz -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_features -Lines: 32 -Current ppfeatures: 0x0000000019a1ff4f -FEATURES BITMASK ENABLEMENT -DPM_PREFETCHER 0x0000000000000001 Y -GFXCLK_DPM 0x0000000000000002 Y -UCLK_DPM 0x0000000000000004 Y -SOCCLK_DPM 0x0000000000000008 Y -UVD_DPM 0x0000000000000010 N -VCE_DPM 0x0000000000000020 N -ULV 0x0000000000000040 Y -MP0CLK_DPM 0x0000000000000080 N -LINK_DPM 0x0000000000000100 Y -DCEFCLK_DPM 0x0000000000000200 Y -AVFS 0x0000000000000400 Y -GFXCLK_DS 0x0000000000000800 Y -SOCCLK_DS 0x0000000000001000 Y -LCLK_DS 0x0000000000002000 Y -PPT 0x0000000000004000 Y -TDC 0x0000000000008000 Y -THERMAL 0x0000000000010000 Y -GFX_PER_CU_CG 0x0000000000020000 N -RM 0x0000000000040000 N -DCEFCLK_DS 0x0000000000080000 N -ACDC 0x0000000000100000 N -VR0HOT 0x0000000000200000 Y -VR1HOT 0x0000000000400000 N -FW_CTF 0x0000000000800000 Y -LED_DISPLAY 0x0000000001000000 Y -FAN_CONTROL 0x0000000002000000 N -FAST_PPT 0x0000000004000000 N -DIDT 0x0000000008000000 Y -ACG 0x0000000010000000 Y -PCC_LIMIT 0x0000000020000000 N -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_force_state -Lines: 1 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_mclk_od -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_num_states -Lines: 3 -states: 2 -0 boot -1 performance -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_od_clk_voltage -Lines: 18 -OD_SCLK: -0: 852Mhz 800mV -1: 991Mhz 900mV -2: 1084Mhz 950mV -3: 1138Mhz 1000mV -4: 1200Mhz 1050mV -5: 1401Mhz 1100mV -6: 1536Mhz 1150mV -7: 1630Mhz 1200mV -OD_MCLK: -0: 167Mhz 800mV -1: 500Mhz 800mV -2: 800Mhz 950mV -3: 945Mhz 1100mV -OD_RANGE: -SCLK: 852MHz 2400MHz -MCLK: 167MHz 1500MHz -VDDC: 800mV 1200mV -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_power_profile_mode -Lines: 8 -NUM MODE_NAME BUSY_SET_POINT FPS USE_RLC_BUSY MIN_ACTIVE_LEVEL - 0 BOOTUP_DEFAULT : 70 60 0 0 - 1 3D_FULL_SCREEN*: 70 60 1 3 - 2 POWER_SAVING : 90 60 0 0 - 3 VIDEO : 70 60 0 0 - 4 VR : 70 90 0 0 - 5 COMPUTE : 30 60 0 6 - 6 CUSTOM : 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_sclk_od -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/product_name -Lines: 1 - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/product_number -Lines: 1 - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/resource -Lines: 13 -0x0000007c00000000 0x0000007dffffffff 0x000000000014220c -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000007e00000000 0x0000007e0fffffff 0x000000000014220c -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x000000000000d000 0x000000000000d0ff 0x0000000000040101 -0x00000000fcd00000 0x00000000fcd7ffff 0x0000000000040200 -0x00000000fcd80000 0x00000000fcd9ffff 0x0000000000046200 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/revision -Lines: 1 -0xc1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/serial_number -Lines: 1 - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/subsystem_device -Lines: 1 -0x04c4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/subsystem_vendor -Lines: 1 -0x1043 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/thermal_throttling_logging -Lines: 1 -0000:09:00.0: thermal throttling logging enabled, with interval 60 seconds -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/uevent -Lines: 6 -DRIVER=amdgpu -PCI_CLASS=30000 -PCI_ID=1002:687F -PCI_SUBSYS_ID=1043:04C4 -PCI_SLOT_NAME=0000:09:00.0 -MODALIAS=pci:v00001002d0000687Fsv00001043sd000004C4bc03sc00i00 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/unique_id -Lines: 1 -0123456789abcdef -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/vbios_version -Lines: 1 -115-D050PIL-100 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/vendor -Lines: 1 -0x1002 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host/host0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/dev_loss_tmo -Lines: 1 -30 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/fabric_name -Lines: 1 -0x0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/node_name -Lines: 1 -0x2000e0071bce95f2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_id -Lines: 1 -0x000002 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_name -Lines: 1 -0x1000e0071bce95f2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_state -Lines: 1 -Online -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_type -Lines: 1 -Point-To-Point (direct nport connection) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/speed -Lines: 1 -16 Gbit -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host/host0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/dumped_frames -Lines: 1 -0xffffffffffffffff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/error_frames -Lines: 1 -0x0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/fcp_packet_aborts -Lines: 1 -0x13 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/invalid_crc_count -Lines: 1 -0x2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/invalid_tx_word_count -Lines: 1 -0x8 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/link_failure_count -Lines: 1 -0x9 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_signal_count -Lines: 1 -0x11 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_sync_count -Lines: 1 -0x10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/nos_count -Lines: 1 -0x12 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/rx_frames -Lines: 1 -0x3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/rx_words -Lines: 1 -0x4 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/seconds_since_last_reset -Lines: 1 -0x7 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/tx_frames -Lines: 1 -0x5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/tx_words -Lines: 1 -0x6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/supported_classes -Lines: 1 -Class 3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/supported_speeds -Lines: 1 -4 Gbit, 8 Gbit, 16 Gbit -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/symbolic_name -Lines: 1 -Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/hfi1_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/board_id -Lines: 1 -HPE 100Gb 1-port OP101 QSFP28 x16 PCIe Gen3 with Intel Omni-Path Adapter -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/fw_ver -Lines: 1 -1.27.0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/hfi1_0/ports -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/hfi1_0/ports/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/VL15_dropped -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/excessive_buffer_overrun_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/link_downed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/link_error_recovery -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/local_link_integrity_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_data -Lines: 1 -345091702026 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_packets -Lines: 1 -638036947 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_remote_physical_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_switch_relay_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_data -Lines: 1 -273558326543 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_discards -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_packets -Lines: 1 -568318856 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_wait -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/symbol_error -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/phys_state -Lines: 1 -5: LinkUp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/rate -Lines: 1 -100 Gb/sec (4X EDR) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/state -Lines: 1 -4: ACTIVE -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/board_id -Lines: 1 -SM_1141000001000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/fw_ver -Lines: 1 -2.31.5050 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/hca_type -Lines: 1 -MT4099 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/VL15_dropped -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_downed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/local_link_integrity_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data -Lines: 1 -2221223609 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets -Lines: 1 -87169372 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_remote_physical_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_switch_relay_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data -Lines: 1 -26509113295 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets -Lines: 1 -85734114 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait -Lines: 1 -3599 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/symbol_error -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/phys_state -Lines: 1 -5: LinkUp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/rate -Lines: 1 -40 Gb/sec (4X QDR) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/state -Lines: 1 -4: ACTIVE -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/VL15_dropped -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_downed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/local_link_integrity_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_data -Lines: 1 -2460436784 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_packets -Lines: 1 -89332064 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_remote_physical_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_switch_relay_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_data -Lines: 1 -26540356890 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_discards -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_packets -Lines: 1 -88622850 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_wait -Lines: 1 -3846 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/symbol_error -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/phys_state -Lines: 1 -5: LinkUp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/rate -Lines: 1 -40 Gb/sec (4X QDR) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/state -Lines: 1 -4: ACTIVE -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/net -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/net/eth0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/addr_assign_type -Lines: 1 -3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/addr_len -Lines: 1 -6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/address -Lines: 1 -01:01:01:01:01:01 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/broadcast -Lines: 1 -ff:ff:ff:ff:ff:ff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_changes -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_down_count -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_up_count -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/dev_id -Lines: 1 -0x20 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/device -SymlinkTo: ../../../devices/pci0000:00/0000:00:1f.6/ -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/dormant -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/duplex -Lines: 1 -full -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/flags -Lines: 1 -0x1303 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/ifalias -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/ifindex -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/iflink -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/link_mode -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/mtu -Lines: 1 -1500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/name_assign_type -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/netdev_group -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/operstate -Lines: 1 -up -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_port_id -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_port_name -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_switch_id -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/speed -Lines: 1 -1000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/tx_queue_len -Lines: 1 -1000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/type -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/nvme -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/nvme/nvme0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/nvme/nvme0/firmware_rev -Lines: 1 -1B2QEXP7 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/nvme/nvme0/model -Lines: 1 -Samsung SSD 970 PRO 512GB -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/nvme/nvme0/serial -Lines: 1 -S680HF8N190894I -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/nvme/nvme0/state -Lines: 1 -live -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/AC -SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0 -SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl/enabled -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_max_power_uw -Lines: 1 -95000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_name -Lines: 1 -long_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_time_window_us -Lines: 1 -999424 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_max_power_uw -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_name -Lines: 1 -short_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_time_window_us -Lines: 1 -2440 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/enabled -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/energy_uj -Lines: 1 -240422366267 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/max_energy_range_uj -Lines: 1 -262143328850 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/name -Lines: 1 -package-0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_max_power_uw -Lines: 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_name -Lines: 1 -long_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_power_limit_uw -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_time_window_us -Lines: 1 -976 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/enabled -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/energy_uj -Lines: 1 -118821284256 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/max_energy_range_uj -Lines: 1 -262143328850 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/name -Lines: 1 -core -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl:a -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_max_power_uw -Lines: 1 -95000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_name -Lines: 1 -long_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_time_window_us -Lines: 1 -999424 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_max_power_uw -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_name -Lines: 1 -short_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_time_window_us -Lines: 1 -2440 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/enabled -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/energy_uj -Lines: 1 -240422366267 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/max_energy_range_uj -Lines: 1 -262143328850 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/name -Lines: 1 -package-10 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/scsi_tape -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/nst0 -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/nst0a -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/nst0l -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/nst0m -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/st0 -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/st0a -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/st0l -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/st0m -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/cooling_device0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/cur_state -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/max_state -Lines: 1 -50 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/type -Lines: 1 -Processor -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/cooling_device1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/cur_state -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/max_state -Lines: 1 -27 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/type -Lines: 1 -intel_powerclamp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/thermal_zone0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/policy -Lines: 1 -step_wise -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/temp -Lines: 1 -49925 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/type -Lines: 1 -bcm2835_thermal -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/thermal_zone1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/mode -Lines: 1 -enabled -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/passive -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/policy -Lines: 1 -step_wise -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/temp -Lines: 1 --44000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/type -Lines: 1 -acpitz -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/device -SymlinkTo: ../../../ACPI0003:00 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/online -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/async -Lines: 1 -disabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/autosuspend_delay_ms -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/control -Lines: 1 -auto -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_kids -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_enabled -Lines: 1 -disabled -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_status -Lines: 1 -unsupported -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_suspended_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_usage -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup -Lines: 1 -enabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_abort_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active_count -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_expire_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_last_time_ms -Lines: 1 -10598 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_max_time_ms -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_prevent_sleep_time_ms -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_total_time_ms -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/subsystem -SymlinkTo: ../../../../../../../../../class/power_supply -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/type -Lines: 1 -Mains -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/uevent -Lines: 2 -POWER_SUPPLY_NAME=AC -POWER_SUPPLY_ONLINE=0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/alarm -Lines: 1 -2369000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity -Lines: 1 -98 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity_level -Lines: 1 -Normal -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_start_threshold -Lines: 1 -95 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_stop_threshold -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/cycle_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/device -SymlinkTo: ../../../PNP0C0A:00 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full -Lines: 1 -50060000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full_design -Lines: 1 -47520000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_now -Lines: 1 -49450000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/manufacturer -Lines: 1 -LGC -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/model_name -Lines: 1 -LNV-45N1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/async -Lines: 1 -disabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/autosuspend_delay_ms -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/control -Lines: 1 -auto -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_kids -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_enabled -Lines: 1 -disabled -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_status -Lines: 1 -unsupported -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_suspended_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_usage -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power_now -Lines: 1 -4830000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/present -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/serial_number -Lines: 1 -38109 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/status -Lines: 1 -Discharging -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/subsystem -SymlinkTo: ../../../../../../../../../class/power_supply -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/technology -Lines: 1 -Li-ion -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/type -Lines: 1 -Battery -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/uevent -Lines: 16 -POWER_SUPPLY_NAME=BAT0 -POWER_SUPPLY_STATUS=Discharging -POWER_SUPPLY_PRESENT=1 -POWER_SUPPLY_TECHNOLOGY=Li-ion -POWER_SUPPLY_CYCLE_COUNT=0 -POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 -POWER_SUPPLY_VOLTAGE_NOW=11750000 -POWER_SUPPLY_POWER_NOW=5064000 -POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 -POWER_SUPPLY_ENERGY_FULL=47390000 -POWER_SUPPLY_ENERGY_NOW=40730000 -POWER_SUPPLY_CAPACITY=85 -POWER_SUPPLY_CAPACITY_LEVEL=Normal -POWER_SUPPLY_MODEL_NAME=LNV-45N1 -POWER_SUPPLY_MANUFACTURER=LGC -POWER_SUPPLY_SERIAL_NUMBER=38109 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_min_design -Lines: 1 -10800000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_now -Lines: 1 -12229000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written -Lines: 1 -512 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:1f.6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/ari_enabled -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/broken_parity_status -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/class -Lines: 1 -0x020000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/consistent_dma_mask_bits -Lines: 1 -64 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/d3cold_allowed -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/device -Lines: 1 -0x15d7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/dma_mask_bits -Lines: 1 -64 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/driver_override -Lines: 1 -(null) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/irq -Lines: 1 -140 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpulist -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpus -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/modalias -Lines: 1 -pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/msi_bus -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/numa_node -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/resource -Lines: 13 -0x00000000ec200000 0x00000000ec21ffff 0x0000000000040200 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/revision -Lines: 1 -0x21 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_device -Lines: 1 -0x225a -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_vendor -Lines: 1 -0x17aa -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/uevent -Lines: 6 -DRIVER=e1000e -PCI_CLASS=20000 -PCI_ID=8086:15D7 -PCI_SUBSYS_ID=17AA:225A -PCI_SLOT_NAME=0000:00:1f.6 -MODALIAS=pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/vendor -Lines: 1 -0x8086 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd/0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/0/name -Lines: 1 -demo -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/0/pool -Lines: 1 -iscsi-images -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/1/name -Lines: 1 -wrong -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/1/pool -Lines: 1 -wrong-images -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/clocksource -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/clocksource/clocksource0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource -Lines: 1 -tsc hpet acpi_pm -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource -Lines: 1 -tsc -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq -SymlinkTo: ../cpufreq/policy0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count -Lines: 1 -10084 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/package_throttle_count -Lines: 1 -34818 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0/topology -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings_list -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/physical_package_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings -Lines: 1 -11 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings_list -Lines: 1 -0,4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq -Lines: 1 -1200195 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq -Lines: 1 -3300000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq -Lines: 1 -1200000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency -Lines: 1 -4294967295 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus -Lines: 1 -1 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors -Lines: 1 -performance powersave -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver -Lines: 1 -intel_pstate -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor -Lines: 1 -powersave -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq -Lines: 1 -3300000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq -Lines: 1 -1200000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed -Lines: 1 - -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/core_throttle_count -Lines: 1 -523 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/package_throttle_count -Lines: 1 -34818 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/topology -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_id -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings_list -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/physical_package_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings -Lines: 1 -22 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings_list -Lines: 1 -1,5 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq -Lines: 1 -2400000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq -Lines: 1 -800000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors -Lines: 1 -performance powersave -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq -Lines: 1 -1219917 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver -Lines: 1 -intel_pstate -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor -Lines: 1 -powersave -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq -Lines: 1 -2400000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq -Lines: 1 -800000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed -Lines: 1 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node/node1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/node/node1/vmstat -Lines: 6 -nr_free_pages 1 -nr_zone_inactive_anon 2 -nr_zone_active_anon 3 -nr_zone_inactive_file 4 -nr_zone_active_file 5 -nr_zone_unevictable 6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node/node2 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/node/node2/vmstat -Lines: 6 -nr_free_pages 7 -nr_zone_inactive_anon 8 -nr_zone_active_anon 9 -nr_zone_inactive_file 10 -nr_zone_active_file 11 -nr_zone_unevictable 12 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0 -Mode: 777 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/writeback_rate_debug -Lines: 7 -rate: 1.1M/sec -dirty: 20.4G -target: 20.4G -proportional: 427.5k -integral: 790.0k -change: 321.5k/sec -next io: 17ms -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0 -Mode: 777 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written -Lines: 1 -512 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us -Lines: 1 -1305 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_used -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_total -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_used -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/flags -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/total_bytes -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/used_bytes -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_reserved -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_size -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_may_use -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_readonly -Lines: 1 -131072 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_used -Lines: 1 -933888 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_total -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_used -Lines: 1 -1867776 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/flags -Lines: 1 -4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/total_bytes -Lines: 1 -1073741824 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/used_bytes -Lines: 1 -933888 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes -Lines: 1 -1073741824 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_total -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_used -Lines: 1 -32768 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/flags -Lines: 1 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/total_bytes -Lines: 1 -8388608 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/used_bytes -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes -Lines: 1 -8388608 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/clone_alignment -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25/size -Lines: 1 -20971520 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26/size -Lines: 1 -20971520 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/big_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/extended_iref -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/mixed_backref -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/skinny_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/label -Lines: 1 -fixture -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/metadata_uuid -Lines: 1 -0abb23a9-579b-43e6-ad30-227ef47fcb9d -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/nodesize -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/quota_override -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/sectorsize -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_used -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_total -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_used -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/flags -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/total_bytes -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/used_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_reserved -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_size -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_may_use -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_readonly -Lines: 1 -262144 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_used -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_total -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_used -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/flags -Lines: 1 -4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/total_bytes -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/used_bytes -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_total -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/flags -Lines: 1 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/total_bytes -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/used_bytes -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/clone_alignment -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop22 -SymlinkTo: ../../../../devices/virtual/block/loop22 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop23 -SymlinkTo: ../../../../devices/virtual/block/loop23 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop24 -SymlinkTo: ../../../../devices/virtual/block/loop24 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop25 -SymlinkTo: ../../../../devices/virtual/block/loop25 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/big_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/extended_iref -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/mixed_backref -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/raid56 -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/skinny_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/label -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/metadata_uuid -Lines: 1 -7f07c59f-6136-449c-ab87-e1cf2328731b -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/nodesize -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/quota_override -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/sectorsize -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sda1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sda1/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/xfs/sda1/stats/stats -Lines: 1 -extent_alloc 1 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sdb1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sdb1/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/xfs/sdb1/stats/stats -Lines: 1 -extent_alloc 2 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/udev_path -Lines: 1 -/home/iscsi/file_back_1G -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/iblock_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/udev_path -Lines: 1 -/dev/rbd1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rbd_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/udev_path -Lines: 1 -/dev/rbd/iscsi-images/demo -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/udev_path -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/7f4a4eb56d -SymlinkTo: ../../../../../../target/core/rd_mcp_119/ramdisk_lio_1G -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -204950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -10325 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -40325 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/795b7c7026 -SymlinkTo: ../../../../../../target/core/iblock_0/block_lio_rbd1 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -104950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -20095 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -71235 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/fff5e16686 -SymlinkTo: ../../../../../../target/core/fileio_1/file_lio_1G -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -301950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -10195 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -30195 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/eba1edf893 -SymlinkTo: ../../../../../../target/core/rbd_0/iscsi-images-demo -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -1234 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -1504 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -4733 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go index 0040753b1..3c18c7610 100644 --- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -26,7 +26,7 @@ const ( // DefaultSysMountPoint is the common mount point of the sys filesystem. DefaultSysMountPoint = "/sys" - // DefaultConfigfsMountPoint is the common mount point of the configfs + // DefaultConfigfsMountPoint is the common mount point of the configfs. DefaultConfigfsMountPoint = "/sys/kernel/config" ) diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index 22cb07a6b..b030951fa 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -14,7 +14,7 @@ package util import ( - "io/ioutil" + "os" "strconv" "strings" ) @@ -66,7 +66,7 @@ func ParsePInt64s(ss []string) ([]*int64, error) { // ReadUintFromFile reads a file and attempts to parse a uint64 from it. func ReadUintFromFile(path string) (uint64, error) { - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return 0, err } @@ -75,7 +75,7 @@ func ReadUintFromFile(path string) (uint64, error) { // ReadIntFromFile reads a file and attempts to parse a int64 from it. func ReadIntFromFile(path string) (int64, error) { - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return 0, err } diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go index 8051161b2..71b7a70eb 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/readfile.go +++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go @@ -15,17 +15,16 @@ package util import ( "io" - "io/ioutil" "os" ) -// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file. -// This is similar to ioutil.ReadFile but without the call to os.Stat, because +// ReadFileNoStat uses io.ReadAll to read contents of entire file. +// This is similar to os.ReadFile but without the call to os.Stat, because // many files in /proc and /sys report incorrect file sizes (either 0 or 4096). -// Reads a max file size of 512kB. For files larger than this, a scanner +// Reads a max file size of 1024kB. For files larger than this, a scanner // should be used. func ReadFileNoStat(filename string) ([]byte, error) { - const maxBufferSize = 1024 * 512 + const maxBufferSize = 1024 * 1024 f, err := os.Open(filename) if err != nil { @@ -34,5 +33,5 @@ func ReadFileNoStat(filename string) ([]byte, error) { defer f.Close() reader := io.LimitReader(f, maxBufferSize) - return ioutil.ReadAll(reader) + return io.ReadAll(reader) } diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go index c07de0b6c..1ab875cee 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -11,7 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux,!appengine +//go:build (linux || darwin) && !appengine +// +build linux darwin +// +build !appengine package util @@ -21,7 +23,7 @@ import ( "syscall" ) -// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. +// SysReadFile is a simplified os.ReadFile that invokes syscall.Read directly. // https://github.com/prometheus/node_exporter/pull/728/files // // Note that this function will not read files larger than 128 bytes. @@ -33,7 +35,7 @@ func SysReadFile(file string) (string, error) { defer f.Close() // On some machines, hwmon drivers are broken and return EAGAIN. This causes - // Go's ioutil.ReadFile implementation to poll forever. + // Go's os.ReadFile implementation to poll forever. // // Since we either want to read data or bail immediately, do the simplest // possible read using syscall directly. diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go index bd55b4537..1d86f5e63 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go @@ -11,7 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux,appengine !linux +//go:build (linux && appengine) || (!linux && !darwin) +// +build linux,appengine !linux,!darwin package util diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index 89e447746..391c07957 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -20,7 +20,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "os" "strconv" @@ -84,7 +83,7 @@ func parseIPVSStats(r io.Reader) (IPVSStats, error) { stats IPVSStats ) - statContent, err := ioutil.ReadAll(r) + statContent, err := io.ReadAll(r) if err != nil { return IPVSStats{}, err } diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go index da3a941d6..db88566bd 100644 --- a/vendor/github.com/prometheus/procfs/kernel_random.go +++ b/vendor/github.com/prometheus/procfs/kernel_random.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package procfs diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go index 0cce190ec..0096cafbd 100644 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -21,7 +21,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// LoadAvg represents an entry in /proc/loadavg +// LoadAvg represents an entry in /proc/loadavg. type LoadAvg struct { Load1 float64 Load5 float64 diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index f0b9e5f75..a95c889cb 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -15,7 +15,7 @@ package procfs import ( "fmt" - "io/ioutil" + "os" "regexp" "strconv" "strings" @@ -64,7 +64,7 @@ type MDStat struct { // structs containing the relevant info. More information available here: // https://raid.wiki.kernel.org/index.php/Mdstat func (fs FS) MDStat() ([]MDStat, error) { - data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) + data, err := os.ReadFile(fs.proc.Path("mdstat")) if err != nil { return nil, err } @@ -166,8 +166,12 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { } func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { + statusFields := strings.Fields(statusLine) + if len(statusFields) < 1 { + return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q", statusLine) + } - sizeStr := strings.Fields(statusLine)[0] + sizeStr := statusFields[0] size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index f7a828bb1..0c482c18c 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -284,7 +284,8 @@ func parseMountStats(r io.Reader) ([]*Mount, error) { } // parseMount parses an entry in /proc/[pid]/mountstats in the format: -// device [device] mounted on [mount] with fstype [type] +// +// device [device] mounted on [mount] with fstype [type] func parseMount(ss []string) (*Mount, error) { if len(ss) < deviceEntryLen { return nil, fmt.Errorf("invalid device entry: %v", ss) diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go index 9964a3600..8300daca0 100644 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -25,7 +25,7 @@ import ( ) // A ConntrackStatEntry represents one line from net/stat/nf_conntrack -// and contains netfilter conntrack statistics at one CPU core +// and contains netfilter conntrack statistics at one CPU core. type ConntrackStatEntry struct { Entries uint64 Found uint64 @@ -38,12 +38,12 @@ type ConntrackStatEntry struct { SearchRestart uint64 } -// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores +// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores. func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) } -// Parses a slice of ConntrackStatEntries from the given filepath +// Parses a slice of ConntrackStatEntries from the given filepath. func readConntrackStat(path string) ([]ConntrackStatEntry, error) { // This file is small and can be read with one syscall. b, err := util.ReadFileNoStat(path) @@ -61,7 +61,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) { return stat, nil } -// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries +// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries. func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { var entries []ConntrackStatEntry @@ -79,7 +79,7 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { return entries, nil } -// Parses a ConntrackStatEntry from given array of fields +// Parses a ConntrackStatEntry from given array of fields. func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { if len(fields) != 17 { return nil, fmt.Errorf("invalid conntrackstat entry, missing fields") @@ -143,7 +143,7 @@ func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { return entry, nil } -// Parses a uint64 from given hex in string +// Parses a uint64 from given hex in string. func parseConntrackStatField(field string) (uint64, error) { val, err := strconv.ParseUint(field, 16, 64) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go index 47a710bef..e66208aa0 100644 --- a/vendor/github.com/prometheus/procfs/net_dev.go +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -87,17 +87,17 @@ func newNetDev(file string) (NetDev, error) { // parseLine parses a single line from the /proc/net/dev file. Header lines // must be filtered prior to calling this method. func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { - parts := strings.SplitN(rawLine, ":", 2) - if len(parts) != 2 { + idx := strings.LastIndex(rawLine, ":") + if idx == -1 { return nil, errors.New("invalid net/dev line, missing colon") } - fields := strings.Fields(strings.TrimSpace(parts[1])) + fields := strings.Fields(strings.TrimSpace(rawLine[idx+1:])) var err error line := &NetDevLine{} // Interface Name - line.Name = strings.TrimSpace(parts[0]) + line.Name = strings.TrimSpace(rawLine[:idx]) if line.Name == "" { return nil, errors.New("invalid net/dev line, empty interface name") } diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index 8c9ee3de8..7fd57d7f4 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -34,7 +34,7 @@ const ( readLimit = 4294967296 // Byte -> 4 GiB ) -// this contains generic data structures for both udp and tcp sockets +// This contains generic data structures for both udp and tcp sockets. type ( // NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header. NetIPSocket []*netIPSocketLine diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go index 8c6de3791..374b6f73f 100644 --- a/vendor/github.com/prometheus/procfs/net_protocols.go +++ b/vendor/github.com/prometheus/procfs/net_protocols.go @@ -23,7 +23,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// NetProtocolStats stores the contents from /proc/net/protocols +// NetProtocolStats stores the contents from /proc/net/protocols. type NetProtocolStats map[string]NetProtocolStatLine // NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We @@ -41,7 +41,7 @@ type NetProtocolStatLine struct { Capabilities NetProtocolCapabilities } -// NetProtocolCapabilities contains a list of capabilities for each protocol +// NetProtocolCapabilities contains a list of capabilities for each protocol. type NetProtocolCapabilities struct { Close bool // 8 Connect bool // 9 diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index 46f12c61d..06b7b8f21 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -27,17 +27,30 @@ import ( // For the proc file format details, // See: // * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 -// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 -// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. +// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 +// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 +// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 -// SoftnetStat contains a single row of data from /proc/net/softnet_stat +// SoftnetStat contains a single row of data from /proc/net/softnet_stat. type SoftnetStat struct { - // Number of processed packets + // Number of processed packets. Processed uint32 - // Number of dropped packets + // Number of dropped packets. Dropped uint32 - // Number of times processing packets ran out of quota + // Number of times processing packets ran out of quota. TimeSqueezed uint32 + // Number of collision occur while obtaining device lock while transmitting. + CPUCollision uint32 + // Number of times cpu woken up received_rps. + ReceivedRps uint32 + // number of times flow limit has been reached. + FlowLimitCount uint32 + // Softnet backlog status. + SoftnetBacklogLen uint32 + // CPU id owning this softnet_data. + Index uint32 + // softnet_data's Width. + Width int } var softNetProcFile = "net/softnet_stat" @@ -66,22 +79,57 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { for s.Scan() { columns := strings.Fields(s.Text()) width := len(columns) + softnetStat := SoftnetStat{} if width < minColumns { return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) } - // We only parse the first three columns at the moment. - us, err := parseHexUint32s(columns[0:3]) - if err != nil { - return nil, err + // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347 + if width >= minColumns { + us, err := parseHexUint32s(columns[0:9]) + if err != nil { + return nil, err + } + + softnetStat.Processed = us[0] + softnetStat.Dropped = us[1] + softnetStat.TimeSqueezed = us[2] + softnetStat.CPUCollision = us[8] + } + + // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 + if width >= 10 { + us, err := parseHexUint32s(columns[9:10]) + if err != nil { + return nil, err + } + + softnetStat.ReceivedRps = us[0] } - stats = append(stats, SoftnetStat{ - Processed: us[0], - Dropped: us[1], - TimeSqueezed: us[2], - }) + // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 + if width >= 11 { + us, err := parseHexUint32s(columns[10:11]) + if err != nil { + return nil, err + } + + softnetStat.FlowLimitCount = us[0] + } + + // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 + if width >= 13 { + us, err := parseHexUint32s(columns[11:13]) + if err != nil { + return nil, err + } + + softnetStat.SoftnetBacklogLen = us[0] + softnetStat.Index = us[1] + } + softnetStat.Width = width + stats = append(stats, softnetStat) } return stats, nil diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go similarity index 96% rename from vendor/github.com/prometheus/procfs/xfrm.go rename to vendor/github.com/prometheus/procfs/net_xfrm.go index eed07c7d7..f9d9d243d 100644 --- a/vendor/github.com/prometheus/procfs/xfrm.go +++ b/vendor/github.com/prometheus/procfs/net_xfrm.go @@ -79,10 +79,13 @@ type XfrmStat struct { // Policy is dead XfrmOutPolDead int // Policy Error - XfrmOutPolError int - XfrmFwdHdrError int + XfrmOutPolError int + // Forward routing of a packet is not allowed + XfrmFwdHdrError int + // State is invalid, perhaps expired XfrmOutStateInvalid int - XfrmAcquireError int + // State hasn’t been fully acquired before use + XfrmAcquireError int } // NewXfrmStat reads the xfrm_stat statistics. diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go index 94d892f11..5cc40aef5 100644 --- a/vendor/github.com/prometheus/procfs/netstat.go +++ b/vendor/github.com/prometheus/procfs/netstat.go @@ -15,19 +15,20 @@ package procfs import ( "bufio" + "io" "os" "path/filepath" "strconv" "strings" ) -// NetStat contains statistics for all the counters from one file +// NetStat contains statistics for all the counters from one file. type NetStat struct { - Filename string Stats map[string][]uint64 + Filename string } -// NetStat retrieves stats from /proc/net/stat/ +// NetStat retrieves stats from `/proc/net/stat/`. func (fs FS) NetStat() ([]NetStat, error) { statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*")) if err != nil { @@ -42,27 +43,43 @@ func (fs FS) NetStat() ([]NetStat, error) { return nil, err } - netStatFile := NetStat{ - Filename: filepath.Base(filePath), - Stats: make(map[string][]uint64), + procNetstat, err := parseNetstat(file) + if err != nil { + return nil, err + } + procNetstat.Filename = filepath.Base(filePath) + + netStatsTotal = append(netStatsTotal, procNetstat) + } + return netStatsTotal, nil +} + +// parseNetstat parses the metrics from `/proc/net/stat/` file +// and returns a NetStat structure. +func parseNetstat(r io.Reader) (NetStat, error) { + var ( + scanner = bufio.NewScanner(r) + netStat = NetStat{ + Stats: make(map[string][]uint64), } - scanner := bufio.NewScanner(file) - scanner.Scan() - // First string is always a header for stats - var headers []string - headers = append(headers, strings.Fields(scanner.Text())...) + ) + + scanner.Scan() - // Other strings represent per-CPU counters - for scanner.Scan() { - for num, counter := range strings.Fields(scanner.Text()) { - value, err := strconv.ParseUint(counter, 16, 32) - if err != nil { - return nil, err - } - netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value) + // First string is always a header for stats + var headers []string + headers = append(headers, strings.Fields(scanner.Text())...) + + // Other strings represent per-CPU counters + for scanner.Scan() { + for num, counter := range strings.Fields(scanner.Text()) { + value, err := strconv.ParseUint(counter, 16, 64) + if err != nil { + return NetStat{}, err } + netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value) } - netStatsTotal = append(netStatsTotal, netStatFile) } - return netStatsTotal, nil + + return netStat, nil } diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 28f696803..c30223af7 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -16,7 +16,7 @@ package procfs import ( "bytes" "fmt" - "io/ioutil" + "io" "os" "strconv" "strings" @@ -82,7 +82,7 @@ func (fs FS) Self() (Proc, error) { // NewProc returns a process for the given pid. // -// Deprecated: use fs.Proc() instead +// Deprecated: Use fs.Proc() instead. func (fs FS) NewProc(pid int) (Proc, error) { return fs.Proc(pid) } @@ -142,7 +142,7 @@ func (p Proc) Wchan() (string, error) { } defer f.Close() - data, err := ioutil.ReadAll(f) + data, err := io.ReadAll(f) if err != nil { return "", err } @@ -185,7 +185,7 @@ func (p Proc) Cwd() (string, error) { return wd, err } -// RootDir returns the absolute path to the process's root directory (as set by chroot) +// RootDir returns the absolute path to the process's root directory (as set by chroot). func (p Proc) RootDir() (string, error) { rdir, err := os.Readlink(p.path("root")) if os.IsNotExist(err) { @@ -311,7 +311,7 @@ func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) { // Schedstat returns task scheduling information for the process. func (p Proc) Schedstat() (ProcSchedstat, error) { - contents, err := ioutil.ReadFile(p.path("schedstat")) + contents, err := os.ReadFile(p.path("schedstat")) if err != nil { return ProcSchedstat{}, err } diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index be45b7987..ea83a75ff 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -23,7 +23,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a +// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a // specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource // controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies // contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in @@ -45,7 +45,7 @@ type Cgroup struct { } // parseCgroupString parses each line of the /proc/[pid]/cgroup file -// Line format is hierarchyID:[controller1,controller2]:path +// Line format is hierarchyID:[controller1,controller2]:path. func parseCgroupString(cgroupStr string) (*Cgroup, error) { var err error @@ -69,7 +69,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) { return cgroup, nil } -// parseCgroups reads each line of the /proc/[pid]/cgroup file +// parseCgroups reads each line of the /proc/[pid]/cgroup file. func parseCgroups(data []byte) ([]Cgroup, error) { var cgroups []Cgroup scanner := bufio.NewScanner(bytes.NewReader(data)) @@ -88,7 +88,7 @@ func parseCgroups(data []byte) ([]Cgroup, error) { // Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process // control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes, -// so the len of the returned struct is equal to the number of active hierarchies on this system +// so the len of the returned struct is equal to the number of active hierarchies on this system. func (p Proc) Cgroups() ([]Cgroup, error) { data, err := util.ReadFileNoStat(p.path("cgroup")) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go new file mode 100644 index 000000000..24d4dce9c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go @@ -0,0 +1,98 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// CgroupSummary models one line from /proc/cgroups. +// This file contains information about the controllers that are compiled into the kernel. +// +// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html +type CgroupSummary struct { + // The name of the controller. controller is also known as subsystem. + SubsysName string + // The unique ID of the cgroup hierarchy on which this controller is mounted. + Hierarchy int + // The number of control groups in this hierarchy using this controller. + Cgroups int + // This field contains the value 1 if this controller is enabled, or 0 if it has been disabled + Enabled int +} + +// parseCgroupSummary parses each line of the /proc/cgroup file +// Line format is `subsys_name hierarchy num_cgroups enabled`. +func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { + var err error + + fields := strings.Fields(CgroupSummaryStr) + // require at least 4 fields + if len(fields) < 4 { + return nil, fmt.Errorf("at least 4 fields required, found %d fields in cgroup info string: %s", len(fields), CgroupSummaryStr) + } + + CgroupSummary := &CgroupSummary{ + SubsysName: fields[0], + } + CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse hierarchy ID") + } + CgroupSummary.Cgroups, err = strconv.Atoi(fields[2]) + if err != nil { + return nil, fmt.Errorf("failed to parse Cgroup Num") + } + CgroupSummary.Enabled, err = strconv.Atoi(fields[3]) + if err != nil { + return nil, fmt.Errorf("failed to parse Enabled") + } + return CgroupSummary, nil +} + +// parseCgroupSummary reads each line of the /proc/cgroup file. +func parseCgroupSummary(data []byte) ([]CgroupSummary, error) { + var CgroupSummarys []CgroupSummary + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + CgroupSummaryString := scanner.Text() + // ignore comment lines + if strings.HasPrefix(CgroupSummaryString, "#") { + continue + } + CgroupSummary, err := parseCgroupSummaryString(CgroupSummaryString) + if err != nil { + return nil, err + } + CgroupSummarys = append(CgroupSummarys, *CgroupSummary) + } + + err := scanner.Err() + return CgroupSummarys, err +} + +// CgroupSummarys returns information about current /proc/cgroups. +func (fs FS) CgroupSummarys() ([]CgroupSummary, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("cgroups")) + if err != nil { + return nil, err + } + return parseCgroupSummary(data) +} diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go index 6134b3580..57a89895d 100644 --- a/vendor/github.com/prometheus/procfs/proc_environ.go +++ b/vendor/github.com/prometheus/procfs/proc_environ.go @@ -19,7 +19,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// Environ reads process environments from /proc//environ +// Environ reads process environments from `/proc//environ`. func (p Proc) Environ() ([]string, error) { environments := make([]string, 0) diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index cf63227f0..1bbdd4a8e 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -22,7 +22,6 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// Regexp variables var ( rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) @@ -122,7 +121,7 @@ func (p ProcFDInfos) Len() int { return len(p) } func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD } -// InotifyWatchLen returns the total number of inotify watches +// InotifyWatchLen returns the total number of inotify watches. func (p ProcFDInfos) InotifyWatchLen() (int, error) { length := 0 for _, f := range p { diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go new file mode 100644 index 000000000..9df79c237 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go @@ -0,0 +1,98 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Interrupt represents a single interrupt line. +type Interrupt struct { + // Info is the type of interrupt. + Info string + // Devices is the name of the device that is located at that IRQ + Devices string + // Values is the number of interrupts per CPU. + Values []string +} + +// Interrupts models the content of /proc/interrupts. Key is the IRQ number. +// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts +// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output +type Interrupts map[string]Interrupt + +// Interrupts creates a new instance from a given Proc instance. +func (p Proc) Interrupts() (Interrupts, error) { + data, err := util.ReadFileNoStat(p.path("interrupts")) + if err != nil { + return nil, err + } + return parseInterrupts(bytes.NewReader(data)) +} + +func parseInterrupts(r io.Reader) (Interrupts, error) { + var ( + interrupts = Interrupts{} + scanner = bufio.NewScanner(r) + ) + + if !scanner.Scan() { + return nil, errors.New("interrupts empty") + } + cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu + + for scanner.Scan() { + parts := strings.Fields(scanner.Text()) + if len(parts) == 0 { // skip empty lines + continue + } + if len(parts) < 2 { + return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts) + } + intName := parts[0][:len(parts[0])-1] // remove trailing : + + if len(parts) == 2 { + interrupts[intName] = Interrupt{ + Info: "", + Devices: "", + Values: []string{ + parts[1], + }, + } + continue + } + + intr := Interrupt{ + Values: parts[1 : cpuNum+1], + } + + if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt + intr.Info = parts[cpuNum+1] + intr.Devices = strings.Join(parts[cpuNum+2:], " ") + } else { + intr.Info = strings.Join(parts[cpuNum+1:], " ") + } + interrupts[intName] = intr + } + + return interrupts, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index dd20f198a..7a1388185 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -79,7 +79,7 @@ var ( // NewLimits returns the current soft limits of the process. // -// Deprecated: use p.Limits() instead +// Deprecated: Use p.Limits() instead. func (p Proc) NewLimits() (ProcLimits, error) { return p.Limits() } diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go index 1d7772d51..f1bcbf32b 100644 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -11,7 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !js // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +// +build !js package procfs @@ -25,7 +27,7 @@ import ( "golang.org/x/sys/unix" ) -// ProcMapPermissions contains permission settings read from /proc/[pid]/maps +// ProcMapPermissions contains permission settings read from `/proc/[pid]/maps`. type ProcMapPermissions struct { // mapping has the [R]ead flag set Read bool @@ -39,8 +41,8 @@ type ProcMapPermissions struct { Private bool } -// ProcMap contains the process memory-mappings of the process, -// read from /proc/[pid]/maps +// ProcMap contains the process memory-mappings of the process +// read from `/proc/[pid]/maps`. type ProcMap struct { // The start address of current mapping. StartAddr uintptr @@ -79,7 +81,7 @@ func parseDevice(s string) (uint64, error) { return unix.Mkdev(uint32(major), uint32(minor)), nil } -// parseAddress just converts a hex-string to a uintptr +// parseAddress converts a hex-string to a uintptr. func parseAddress(s string) (uintptr, error) { a, err := strconv.ParseUint(s, 16, 0) if err != nil { @@ -89,7 +91,7 @@ func parseAddress(s string) (uintptr, error) { return uintptr(a), nil } -// parseAddresses parses the start-end address +// parseAddresses parses the start-end address. func parseAddresses(s string) (uintptr, uintptr, error) { toks := strings.Split(s, "-") if len(toks) < 2 { diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go new file mode 100644 index 000000000..6a43bb245 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -0,0 +1,443 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcNetstat models the content of /proc//net/netstat. +type ProcNetstat struct { + // The process ID. + PID int + TcpExt + IpExt +} + +type TcpExt struct { // nolint:revive + SyncookiesSent *float64 + SyncookiesRecv *float64 + SyncookiesFailed *float64 + EmbryonicRsts *float64 + PruneCalled *float64 + RcvPruned *float64 + OfoPruned *float64 + OutOfWindowIcmps *float64 + LockDroppedIcmps *float64 + ArpFilter *float64 + TW *float64 + TWRecycled *float64 + TWKilled *float64 + PAWSActive *float64 + PAWSEstab *float64 + DelayedACKs *float64 + DelayedACKLocked *float64 + DelayedACKLost *float64 + ListenOverflows *float64 + ListenDrops *float64 + TCPHPHits *float64 + TCPPureAcks *float64 + TCPHPAcks *float64 + TCPRenoRecovery *float64 + TCPSackRecovery *float64 + TCPSACKReneging *float64 + TCPSACKReorder *float64 + TCPRenoReorder *float64 + TCPTSReorder *float64 + TCPFullUndo *float64 + TCPPartialUndo *float64 + TCPDSACKUndo *float64 + TCPLossUndo *float64 + TCPLostRetransmit *float64 + TCPRenoFailures *float64 + TCPSackFailures *float64 + TCPLossFailures *float64 + TCPFastRetrans *float64 + TCPSlowStartRetrans *float64 + TCPTimeouts *float64 + TCPLossProbes *float64 + TCPLossProbeRecovery *float64 + TCPRenoRecoveryFail *float64 + TCPSackRecoveryFail *float64 + TCPRcvCollapsed *float64 + TCPDSACKOldSent *float64 + TCPDSACKOfoSent *float64 + TCPDSACKRecv *float64 + TCPDSACKOfoRecv *float64 + TCPAbortOnData *float64 + TCPAbortOnClose *float64 + TCPAbortOnMemory *float64 + TCPAbortOnTimeout *float64 + TCPAbortOnLinger *float64 + TCPAbortFailed *float64 + TCPMemoryPressures *float64 + TCPMemoryPressuresChrono *float64 + TCPSACKDiscard *float64 + TCPDSACKIgnoredOld *float64 + TCPDSACKIgnoredNoUndo *float64 + TCPSpuriousRTOs *float64 + TCPMD5NotFound *float64 + TCPMD5Unexpected *float64 + TCPMD5Failure *float64 + TCPSackShifted *float64 + TCPSackMerged *float64 + TCPSackShiftFallback *float64 + TCPBacklogDrop *float64 + PFMemallocDrop *float64 + TCPMinTTLDrop *float64 + TCPDeferAcceptDrop *float64 + IPReversePathFilter *float64 + TCPTimeWaitOverflow *float64 + TCPReqQFullDoCookies *float64 + TCPReqQFullDrop *float64 + TCPRetransFail *float64 + TCPRcvCoalesce *float64 + TCPRcvQDrop *float64 + TCPOFOQueue *float64 + TCPOFODrop *float64 + TCPOFOMerge *float64 + TCPChallengeACK *float64 + TCPSYNChallenge *float64 + TCPFastOpenActive *float64 + TCPFastOpenActiveFail *float64 + TCPFastOpenPassive *float64 + TCPFastOpenPassiveFail *float64 + TCPFastOpenListenOverflow *float64 + TCPFastOpenCookieReqd *float64 + TCPFastOpenBlackhole *float64 + TCPSpuriousRtxHostQueues *float64 + BusyPollRxPackets *float64 + TCPAutoCorking *float64 + TCPFromZeroWindowAdv *float64 + TCPToZeroWindowAdv *float64 + TCPWantZeroWindowAdv *float64 + TCPSynRetrans *float64 + TCPOrigDataSent *float64 + TCPHystartTrainDetect *float64 + TCPHystartTrainCwnd *float64 + TCPHystartDelayDetect *float64 + TCPHystartDelayCwnd *float64 + TCPACKSkippedSynRecv *float64 + TCPACKSkippedPAWS *float64 + TCPACKSkippedSeq *float64 + TCPACKSkippedFinWait2 *float64 + TCPACKSkippedTimeWait *float64 + TCPACKSkippedChallenge *float64 + TCPWinProbe *float64 + TCPKeepAlive *float64 + TCPMTUPFail *float64 + TCPMTUPSuccess *float64 + TCPWqueueTooBig *float64 +} + +type IpExt struct { // nolint:revive + InNoRoutes *float64 + InTruncatedPkts *float64 + InMcastPkts *float64 + OutMcastPkts *float64 + InBcastPkts *float64 + OutBcastPkts *float64 + InOctets *float64 + OutOctets *float64 + InMcastOctets *float64 + OutMcastOctets *float64 + InBcastOctets *float64 + OutBcastOctets *float64 + InCsumErrors *float64 + InNoECTPkts *float64 + InECT1Pkts *float64 + InECT0Pkts *float64 + InCEPkts *float64 + ReasmOverlaps *float64 +} + +func (p Proc) Netstat() (ProcNetstat, error) { + filename := p.path("net/netstat") + data, err := util.ReadFileNoStat(filename) + if err != nil { + return ProcNetstat{PID: p.PID}, err + } + procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename) + procNetstat.PID = p.PID + return procNetstat, err +} + +// parseProcNetstat parses the metrics from proc//net/netstat file +// and returns a ProcNetstat structure. +func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { + var ( + scanner = bufio.NewScanner(r) + procNetstat = ProcNetstat{} + ) + + for scanner.Scan() { + nameParts := strings.Split(scanner.Text(), " ") + scanner.Scan() + valueParts := strings.Split(scanner.Text(), " ") + // Remove trailing :. + protocol := strings.TrimSuffix(nameParts[0], ":") + if len(nameParts) != len(valueParts) { + return procNetstat, fmt.Errorf("mismatch field count mismatch in %s: %s", + fileName, protocol) + } + for i := 1; i < len(nameParts); i++ { + value, err := strconv.ParseFloat(valueParts[i], 64) + if err != nil { + return procNetstat, err + } + key := nameParts[i] + + switch protocol { + case "TcpExt": + switch key { + case "SyncookiesSent": + procNetstat.TcpExt.SyncookiesSent = &value + case "SyncookiesRecv": + procNetstat.TcpExt.SyncookiesRecv = &value + case "SyncookiesFailed": + procNetstat.TcpExt.SyncookiesFailed = &value + case "EmbryonicRsts": + procNetstat.TcpExt.EmbryonicRsts = &value + case "PruneCalled": + procNetstat.TcpExt.PruneCalled = &value + case "RcvPruned": + procNetstat.TcpExt.RcvPruned = &value + case "OfoPruned": + procNetstat.TcpExt.OfoPruned = &value + case "OutOfWindowIcmps": + procNetstat.TcpExt.OutOfWindowIcmps = &value + case "LockDroppedIcmps": + procNetstat.TcpExt.LockDroppedIcmps = &value + case "ArpFilter": + procNetstat.TcpExt.ArpFilter = &value + case "TW": + procNetstat.TcpExt.TW = &value + case "TWRecycled": + procNetstat.TcpExt.TWRecycled = &value + case "TWKilled": + procNetstat.TcpExt.TWKilled = &value + case "PAWSActive": + procNetstat.TcpExt.PAWSActive = &value + case "PAWSEstab": + procNetstat.TcpExt.PAWSEstab = &value + case "DelayedACKs": + procNetstat.TcpExt.DelayedACKs = &value + case "DelayedACKLocked": + procNetstat.TcpExt.DelayedACKLocked = &value + case "DelayedACKLost": + procNetstat.TcpExt.DelayedACKLost = &value + case "ListenOverflows": + procNetstat.TcpExt.ListenOverflows = &value + case "ListenDrops": + procNetstat.TcpExt.ListenDrops = &value + case "TCPHPHits": + procNetstat.TcpExt.TCPHPHits = &value + case "TCPPureAcks": + procNetstat.TcpExt.TCPPureAcks = &value + case "TCPHPAcks": + procNetstat.TcpExt.TCPHPAcks = &value + case "TCPRenoRecovery": + procNetstat.TcpExt.TCPRenoRecovery = &value + case "TCPSackRecovery": + procNetstat.TcpExt.TCPSackRecovery = &value + case "TCPSACKReneging": + procNetstat.TcpExt.TCPSACKReneging = &value + case "TCPSACKReorder": + procNetstat.TcpExt.TCPSACKReorder = &value + case "TCPRenoReorder": + procNetstat.TcpExt.TCPRenoReorder = &value + case "TCPTSReorder": + procNetstat.TcpExt.TCPTSReorder = &value + case "TCPFullUndo": + procNetstat.TcpExt.TCPFullUndo = &value + case "TCPPartialUndo": + procNetstat.TcpExt.TCPPartialUndo = &value + case "TCPDSACKUndo": + procNetstat.TcpExt.TCPDSACKUndo = &value + case "TCPLossUndo": + procNetstat.TcpExt.TCPLossUndo = &value + case "TCPLostRetransmit": + procNetstat.TcpExt.TCPLostRetransmit = &value + case "TCPRenoFailures": + procNetstat.TcpExt.TCPRenoFailures = &value + case "TCPSackFailures": + procNetstat.TcpExt.TCPSackFailures = &value + case "TCPLossFailures": + procNetstat.TcpExt.TCPLossFailures = &value + case "TCPFastRetrans": + procNetstat.TcpExt.TCPFastRetrans = &value + case "TCPSlowStartRetrans": + procNetstat.TcpExt.TCPSlowStartRetrans = &value + case "TCPTimeouts": + procNetstat.TcpExt.TCPTimeouts = &value + case "TCPLossProbes": + procNetstat.TcpExt.TCPLossProbes = &value + case "TCPLossProbeRecovery": + procNetstat.TcpExt.TCPLossProbeRecovery = &value + case "TCPRenoRecoveryFail": + procNetstat.TcpExt.TCPRenoRecoveryFail = &value + case "TCPSackRecoveryFail": + procNetstat.TcpExt.TCPSackRecoveryFail = &value + case "TCPRcvCollapsed": + procNetstat.TcpExt.TCPRcvCollapsed = &value + case "TCPDSACKOldSent": + procNetstat.TcpExt.TCPDSACKOldSent = &value + case "TCPDSACKOfoSent": + procNetstat.TcpExt.TCPDSACKOfoSent = &value + case "TCPDSACKRecv": + procNetstat.TcpExt.TCPDSACKRecv = &value + case "TCPDSACKOfoRecv": + procNetstat.TcpExt.TCPDSACKOfoRecv = &value + case "TCPAbortOnData": + procNetstat.TcpExt.TCPAbortOnData = &value + case "TCPAbortOnClose": + procNetstat.TcpExt.TCPAbortOnClose = &value + case "TCPDeferAcceptDrop": + procNetstat.TcpExt.TCPDeferAcceptDrop = &value + case "IPReversePathFilter": + procNetstat.TcpExt.IPReversePathFilter = &value + case "TCPTimeWaitOverflow": + procNetstat.TcpExt.TCPTimeWaitOverflow = &value + case "TCPReqQFullDoCookies": + procNetstat.TcpExt.TCPReqQFullDoCookies = &value + case "TCPReqQFullDrop": + procNetstat.TcpExt.TCPReqQFullDrop = &value + case "TCPRetransFail": + procNetstat.TcpExt.TCPRetransFail = &value + case "TCPRcvCoalesce": + procNetstat.TcpExt.TCPRcvCoalesce = &value + case "TCPRcvQDrop": + procNetstat.TcpExt.TCPRcvQDrop = &value + case "TCPOFOQueue": + procNetstat.TcpExt.TCPOFOQueue = &value + case "TCPOFODrop": + procNetstat.TcpExt.TCPOFODrop = &value + case "TCPOFOMerge": + procNetstat.TcpExt.TCPOFOMerge = &value + case "TCPChallengeACK": + procNetstat.TcpExt.TCPChallengeACK = &value + case "TCPSYNChallenge": + procNetstat.TcpExt.TCPSYNChallenge = &value + case "TCPFastOpenActive": + procNetstat.TcpExt.TCPFastOpenActive = &value + case "TCPFastOpenActiveFail": + procNetstat.TcpExt.TCPFastOpenActiveFail = &value + case "TCPFastOpenPassive": + procNetstat.TcpExt.TCPFastOpenPassive = &value + case "TCPFastOpenPassiveFail": + procNetstat.TcpExt.TCPFastOpenPassiveFail = &value + case "TCPFastOpenListenOverflow": + procNetstat.TcpExt.TCPFastOpenListenOverflow = &value + case "TCPFastOpenCookieReqd": + procNetstat.TcpExt.TCPFastOpenCookieReqd = &value + case "TCPFastOpenBlackhole": + procNetstat.TcpExt.TCPFastOpenBlackhole = &value + case "TCPSpuriousRtxHostQueues": + procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value + case "BusyPollRxPackets": + procNetstat.TcpExt.BusyPollRxPackets = &value + case "TCPAutoCorking": + procNetstat.TcpExt.TCPAutoCorking = &value + case "TCPFromZeroWindowAdv": + procNetstat.TcpExt.TCPFromZeroWindowAdv = &value + case "TCPToZeroWindowAdv": + procNetstat.TcpExt.TCPToZeroWindowAdv = &value + case "TCPWantZeroWindowAdv": + procNetstat.TcpExt.TCPWantZeroWindowAdv = &value + case "TCPSynRetrans": + procNetstat.TcpExt.TCPSynRetrans = &value + case "TCPOrigDataSent": + procNetstat.TcpExt.TCPOrigDataSent = &value + case "TCPHystartTrainDetect": + procNetstat.TcpExt.TCPHystartTrainDetect = &value + case "TCPHystartTrainCwnd": + procNetstat.TcpExt.TCPHystartTrainCwnd = &value + case "TCPHystartDelayDetect": + procNetstat.TcpExt.TCPHystartDelayDetect = &value + case "TCPHystartDelayCwnd": + procNetstat.TcpExt.TCPHystartDelayCwnd = &value + case "TCPACKSkippedSynRecv": + procNetstat.TcpExt.TCPACKSkippedSynRecv = &value + case "TCPACKSkippedPAWS": + procNetstat.TcpExt.TCPACKSkippedPAWS = &value + case "TCPACKSkippedSeq": + procNetstat.TcpExt.TCPACKSkippedSeq = &value + case "TCPACKSkippedFinWait2": + procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value + case "TCPACKSkippedTimeWait": + procNetstat.TcpExt.TCPACKSkippedTimeWait = &value + case "TCPACKSkippedChallenge": + procNetstat.TcpExt.TCPACKSkippedChallenge = &value + case "TCPWinProbe": + procNetstat.TcpExt.TCPWinProbe = &value + case "TCPKeepAlive": + procNetstat.TcpExt.TCPKeepAlive = &value + case "TCPMTUPFail": + procNetstat.TcpExt.TCPMTUPFail = &value + case "TCPMTUPSuccess": + procNetstat.TcpExt.TCPMTUPSuccess = &value + case "TCPWqueueTooBig": + procNetstat.TcpExt.TCPWqueueTooBig = &value + } + case "IpExt": + switch key { + case "InNoRoutes": + procNetstat.IpExt.InNoRoutes = &value + case "InTruncatedPkts": + procNetstat.IpExt.InTruncatedPkts = &value + case "InMcastPkts": + procNetstat.IpExt.InMcastPkts = &value + case "OutMcastPkts": + procNetstat.IpExt.OutMcastPkts = &value + case "InBcastPkts": + procNetstat.IpExt.InBcastPkts = &value + case "OutBcastPkts": + procNetstat.IpExt.OutBcastPkts = &value + case "InOctets": + procNetstat.IpExt.InOctets = &value + case "OutOctets": + procNetstat.IpExt.OutOctets = &value + case "InMcastOctets": + procNetstat.IpExt.InMcastOctets = &value + case "OutMcastOctets": + procNetstat.IpExt.OutMcastOctets = &value + case "InBcastOctets": + procNetstat.IpExt.InBcastOctets = &value + case "OutBcastOctets": + procNetstat.IpExt.OutBcastOctets = &value + case "InCsumErrors": + procNetstat.IpExt.InCsumErrors = &value + case "InNoECTPkts": + procNetstat.IpExt.InNoECTPkts = &value + case "InECT1Pkts": + procNetstat.IpExt.InECT1Pkts = &value + case "InECT0Pkts": + procNetstat.IpExt.InECT0Pkts = &value + case "InCEPkts": + procNetstat.IpExt.InCEPkts = &value + case "ReasmOverlaps": + procNetstat.IpExt.ReasmOverlaps = &value + } + } + } + } + return procNetstat, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go index dc6c14f0a..a68fe1529 100644 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -35,9 +35,10 @@ import ( const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" -// PSILine is a single line of values as returned by /proc/pressure/* -// The Avg entries are averages over n seconds, as a percentage -// The Total line is in microseconds +// PSILine is a single line of values as returned by `/proc/pressure/*`. +// +// The Avg entries are averages over n seconds, as a percentage. +// The Total line is in microseconds. type PSILine struct { Avg10 float64 Avg60 float64 @@ -46,8 +47,9 @@ type PSILine struct { } // PSIStats represent pressure stall information from /proc/pressure/* -// Some indicates the share of time in which at least some tasks are stalled -// Full indicates the share of time in which all non-idle tasks are stalled simultaneously +// +// "Some" indicates the share of time in which at least some tasks are stalled. +// "Full" indicates the share of time in which all non-idle tasks are stalled simultaneously. type PSIStats struct { Some *PSILine Full *PSILine @@ -65,7 +67,7 @@ func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { return parsePSIStats(resource, bytes.NewReader(data)) } -// parsePSIStats parses the specified file for pressure stall information +// parsePSIStats parses the specified file for pressure stall information. func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { psiStats := PSIStats{} diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index a576a720a..0e97d9957 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package procfs @@ -28,30 +29,30 @@ import ( ) var ( - // match the header line before each mapped zone in /proc/pid/smaps + // match the header line before each mapped zone in `/proc/pid/smaps`. procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) ) type ProcSMapsRollup struct { - // Amount of the mapping that is currently resident in RAM + // Amount of the mapping that is currently resident in RAM. Rss uint64 - // Process's proportional share of this mapping + // Process's proportional share of this mapping. Pss uint64 - // Size in bytes of clean shared pages + // Size in bytes of clean shared pages. SharedClean uint64 - // Size in bytes of dirty shared pages + // Size in bytes of dirty shared pages. SharedDirty uint64 - // Size in bytes of clean private pages + // Size in bytes of clean private pages. PrivateClean uint64 - // Size in bytes of dirty private pages + // Size in bytes of dirty private pages. PrivateDirty uint64 - // Amount of memory currently marked as referenced or accessed + // Amount of memory currently marked as referenced or accessed. Referenced uint64 - // Amount of memory that does not belong to any file + // Amount of memory that does not belong to any file. Anonymous uint64 - // Amount would-be-anonymous memory currently on swap + // Amount would-be-anonymous memory currently on swap. Swap uint64 - // Process's proportional memory on swap + // Process's proportional memory on swap. SwapPss uint64 } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go new file mode 100644 index 000000000..6c46b7188 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -0,0 +1,353 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcSnmp models the content of /proc//net/snmp. +type ProcSnmp struct { + // The process ID. + PID int + Ip + Icmp + IcmpMsg + Tcp + Udp + UdpLite +} + +type Ip struct { // nolint:revive + Forwarding *float64 + DefaultTTL *float64 + InReceives *float64 + InHdrErrors *float64 + InAddrErrors *float64 + ForwDatagrams *float64 + InUnknownProtos *float64 + InDiscards *float64 + InDelivers *float64 + OutRequests *float64 + OutDiscards *float64 + OutNoRoutes *float64 + ReasmTimeout *float64 + ReasmReqds *float64 + ReasmOKs *float64 + ReasmFails *float64 + FragOKs *float64 + FragFails *float64 + FragCreates *float64 +} + +type Icmp struct { // nolint:revive + InMsgs *float64 + InErrors *float64 + InCsumErrors *float64 + InDestUnreachs *float64 + InTimeExcds *float64 + InParmProbs *float64 + InSrcQuenchs *float64 + InRedirects *float64 + InEchos *float64 + InEchoReps *float64 + InTimestamps *float64 + InTimestampReps *float64 + InAddrMasks *float64 + InAddrMaskReps *float64 + OutMsgs *float64 + OutErrors *float64 + OutDestUnreachs *float64 + OutTimeExcds *float64 + OutParmProbs *float64 + OutSrcQuenchs *float64 + OutRedirects *float64 + OutEchos *float64 + OutEchoReps *float64 + OutTimestamps *float64 + OutTimestampReps *float64 + OutAddrMasks *float64 + OutAddrMaskReps *float64 +} + +type IcmpMsg struct { + InType3 *float64 + OutType3 *float64 +} + +type Tcp struct { // nolint:revive + RtoAlgorithm *float64 + RtoMin *float64 + RtoMax *float64 + MaxConn *float64 + ActiveOpens *float64 + PassiveOpens *float64 + AttemptFails *float64 + EstabResets *float64 + CurrEstab *float64 + InSegs *float64 + OutSegs *float64 + RetransSegs *float64 + InErrs *float64 + OutRsts *float64 + InCsumErrors *float64 +} + +type Udp struct { // nolint:revive + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 +} + +type UdpLite struct { // nolint:revive + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 +} + +func (p Proc) Snmp() (ProcSnmp, error) { + filename := p.path("net/snmp") + data, err := util.ReadFileNoStat(filename) + if err != nil { + return ProcSnmp{PID: p.PID}, err + } + procSnmp, err := parseSnmp(bytes.NewReader(data), filename) + procSnmp.PID = p.PID + return procSnmp, err +} + +// parseSnmp parses the metrics from proc//net/snmp file +// and returns a map contains those metrics (e.g. {"Ip": {"Forwarding": 2}}). +func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { + var ( + scanner = bufio.NewScanner(r) + procSnmp = ProcSnmp{} + ) + + for scanner.Scan() { + nameParts := strings.Split(scanner.Text(), " ") + scanner.Scan() + valueParts := strings.Split(scanner.Text(), " ") + // Remove trailing :. + protocol := strings.TrimSuffix(nameParts[0], ":") + if len(nameParts) != len(valueParts) { + return procSnmp, fmt.Errorf("mismatch field count mismatch in %s: %s", + fileName, protocol) + } + for i := 1; i < len(nameParts); i++ { + value, err := strconv.ParseFloat(valueParts[i], 64) + if err != nil { + return procSnmp, err + } + key := nameParts[i] + + switch protocol { + case "Ip": + switch key { + case "Forwarding": + procSnmp.Ip.Forwarding = &value + case "DefaultTTL": + procSnmp.Ip.DefaultTTL = &value + case "InReceives": + procSnmp.Ip.InReceives = &value + case "InHdrErrors": + procSnmp.Ip.InHdrErrors = &value + case "InAddrErrors": + procSnmp.Ip.InAddrErrors = &value + case "ForwDatagrams": + procSnmp.Ip.ForwDatagrams = &value + case "InUnknownProtos": + procSnmp.Ip.InUnknownProtos = &value + case "InDiscards": + procSnmp.Ip.InDiscards = &value + case "InDelivers": + procSnmp.Ip.InDelivers = &value + case "OutRequests": + procSnmp.Ip.OutRequests = &value + case "OutDiscards": + procSnmp.Ip.OutDiscards = &value + case "OutNoRoutes": + procSnmp.Ip.OutNoRoutes = &value + case "ReasmTimeout": + procSnmp.Ip.ReasmTimeout = &value + case "ReasmReqds": + procSnmp.Ip.ReasmReqds = &value + case "ReasmOKs": + procSnmp.Ip.ReasmOKs = &value + case "ReasmFails": + procSnmp.Ip.ReasmFails = &value + case "FragOKs": + procSnmp.Ip.FragOKs = &value + case "FragFails": + procSnmp.Ip.FragFails = &value + case "FragCreates": + procSnmp.Ip.FragCreates = &value + } + case "Icmp": + switch key { + case "InMsgs": + procSnmp.Icmp.InMsgs = &value + case "InErrors": + procSnmp.Icmp.InErrors = &value + case "InCsumErrors": + procSnmp.Icmp.InCsumErrors = &value + case "InDestUnreachs": + procSnmp.Icmp.InDestUnreachs = &value + case "InTimeExcds": + procSnmp.Icmp.InTimeExcds = &value + case "InParmProbs": + procSnmp.Icmp.InParmProbs = &value + case "InSrcQuenchs": + procSnmp.Icmp.InSrcQuenchs = &value + case "InRedirects": + procSnmp.Icmp.InRedirects = &value + case "InEchos": + procSnmp.Icmp.InEchos = &value + case "InEchoReps": + procSnmp.Icmp.InEchoReps = &value + case "InTimestamps": + procSnmp.Icmp.InTimestamps = &value + case "InTimestampReps": + procSnmp.Icmp.InTimestampReps = &value + case "InAddrMasks": + procSnmp.Icmp.InAddrMasks = &value + case "InAddrMaskReps": + procSnmp.Icmp.InAddrMaskReps = &value + case "OutMsgs": + procSnmp.Icmp.OutMsgs = &value + case "OutErrors": + procSnmp.Icmp.OutErrors = &value + case "OutDestUnreachs": + procSnmp.Icmp.OutDestUnreachs = &value + case "OutTimeExcds": + procSnmp.Icmp.OutTimeExcds = &value + case "OutParmProbs": + procSnmp.Icmp.OutParmProbs = &value + case "OutSrcQuenchs": + procSnmp.Icmp.OutSrcQuenchs = &value + case "OutRedirects": + procSnmp.Icmp.OutRedirects = &value + case "OutEchos": + procSnmp.Icmp.OutEchos = &value + case "OutEchoReps": + procSnmp.Icmp.OutEchoReps = &value + case "OutTimestamps": + procSnmp.Icmp.OutTimestamps = &value + case "OutTimestampReps": + procSnmp.Icmp.OutTimestampReps = &value + case "OutAddrMasks": + procSnmp.Icmp.OutAddrMasks = &value + case "OutAddrMaskReps": + procSnmp.Icmp.OutAddrMaskReps = &value + } + case "IcmpMsg": + switch key { + case "InType3": + procSnmp.IcmpMsg.InType3 = &value + case "OutType3": + procSnmp.IcmpMsg.OutType3 = &value + } + case "Tcp": + switch key { + case "RtoAlgorithm": + procSnmp.Tcp.RtoAlgorithm = &value + case "RtoMin": + procSnmp.Tcp.RtoMin = &value + case "RtoMax": + procSnmp.Tcp.RtoMax = &value + case "MaxConn": + procSnmp.Tcp.MaxConn = &value + case "ActiveOpens": + procSnmp.Tcp.ActiveOpens = &value + case "PassiveOpens": + procSnmp.Tcp.PassiveOpens = &value + case "AttemptFails": + procSnmp.Tcp.AttemptFails = &value + case "EstabResets": + procSnmp.Tcp.EstabResets = &value + case "CurrEstab": + procSnmp.Tcp.CurrEstab = &value + case "InSegs": + procSnmp.Tcp.InSegs = &value + case "OutSegs": + procSnmp.Tcp.OutSegs = &value + case "RetransSegs": + procSnmp.Tcp.RetransSegs = &value + case "InErrs": + procSnmp.Tcp.InErrs = &value + case "OutRsts": + procSnmp.Tcp.OutRsts = &value + case "InCsumErrors": + procSnmp.Tcp.InCsumErrors = &value + } + case "Udp": + switch key { + case "InDatagrams": + procSnmp.Udp.InDatagrams = &value + case "NoPorts": + procSnmp.Udp.NoPorts = &value + case "InErrors": + procSnmp.Udp.InErrors = &value + case "OutDatagrams": + procSnmp.Udp.OutDatagrams = &value + case "RcvbufErrors": + procSnmp.Udp.RcvbufErrors = &value + case "SndbufErrors": + procSnmp.Udp.SndbufErrors = &value + case "InCsumErrors": + procSnmp.Udp.InCsumErrors = &value + case "IgnoredMulti": + procSnmp.Udp.IgnoredMulti = &value + } + case "UdpLite": + switch key { + case "InDatagrams": + procSnmp.UdpLite.InDatagrams = &value + case "NoPorts": + procSnmp.UdpLite.NoPorts = &value + case "InErrors": + procSnmp.UdpLite.InErrors = &value + case "OutDatagrams": + procSnmp.UdpLite.OutDatagrams = &value + case "RcvbufErrors": + procSnmp.UdpLite.RcvbufErrors = &value + case "SndbufErrors": + procSnmp.UdpLite.SndbufErrors = &value + case "InCsumErrors": + procSnmp.UdpLite.InCsumErrors = &value + case "IgnoredMulti": + procSnmp.UdpLite.IgnoredMulti = &value + } + } + } + } + return procSnmp, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go new file mode 100644 index 000000000..3059cc6a1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -0,0 +1,381 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "io" + "os" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcSnmp6 models the content of /proc//net/snmp6. +type ProcSnmp6 struct { + // The process ID. + PID int + Ip6 + Icmp6 + Udp6 + UdpLite6 +} + +type Ip6 struct { // nolint:revive + InReceives *float64 + InHdrErrors *float64 + InTooBigErrors *float64 + InNoRoutes *float64 + InAddrErrors *float64 + InUnknownProtos *float64 + InTruncatedPkts *float64 + InDiscards *float64 + InDelivers *float64 + OutForwDatagrams *float64 + OutRequests *float64 + OutDiscards *float64 + OutNoRoutes *float64 + ReasmTimeout *float64 + ReasmReqds *float64 + ReasmOKs *float64 + ReasmFails *float64 + FragOKs *float64 + FragFails *float64 + FragCreates *float64 + InMcastPkts *float64 + OutMcastPkts *float64 + InOctets *float64 + OutOctets *float64 + InMcastOctets *float64 + OutMcastOctets *float64 + InBcastOctets *float64 + OutBcastOctets *float64 + InNoECTPkts *float64 + InECT1Pkts *float64 + InECT0Pkts *float64 + InCEPkts *float64 +} + +type Icmp6 struct { + InMsgs *float64 + InErrors *float64 + OutMsgs *float64 + OutErrors *float64 + InCsumErrors *float64 + InDestUnreachs *float64 + InPktTooBigs *float64 + InTimeExcds *float64 + InParmProblems *float64 + InEchos *float64 + InEchoReplies *float64 + InGroupMembQueries *float64 + InGroupMembResponses *float64 + InGroupMembReductions *float64 + InRouterSolicits *float64 + InRouterAdvertisements *float64 + InNeighborSolicits *float64 + InNeighborAdvertisements *float64 + InRedirects *float64 + InMLDv2Reports *float64 + OutDestUnreachs *float64 + OutPktTooBigs *float64 + OutTimeExcds *float64 + OutParmProblems *float64 + OutEchos *float64 + OutEchoReplies *float64 + OutGroupMembQueries *float64 + OutGroupMembResponses *float64 + OutGroupMembReductions *float64 + OutRouterSolicits *float64 + OutRouterAdvertisements *float64 + OutNeighborSolicits *float64 + OutNeighborAdvertisements *float64 + OutRedirects *float64 + OutMLDv2Reports *float64 + InType1 *float64 + InType134 *float64 + InType135 *float64 + InType136 *float64 + InType143 *float64 + OutType133 *float64 + OutType135 *float64 + OutType136 *float64 + OutType143 *float64 +} + +type Udp6 struct { // nolint:revive + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 +} + +type UdpLite6 struct { // nolint:revive + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 +} + +func (p Proc) Snmp6() (ProcSnmp6, error) { + filename := p.path("net/snmp6") + data, err := util.ReadFileNoStat(filename) + if err != nil { + // On systems with IPv6 disabled, this file won't exist. + // Do nothing. + if errors.Is(err, os.ErrNotExist) { + return ProcSnmp6{PID: p.PID}, nil + } + + return ProcSnmp6{PID: p.PID}, err + } + + procSnmp6, err := parseSNMP6Stats(bytes.NewReader(data)) + procSnmp6.PID = p.PID + return procSnmp6, err +} + +// parseSnmp6 parses the metrics from proc//net/snmp6 file +// and returns a map contains those metrics. +func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { + var ( + scanner = bufio.NewScanner(r) + procSnmp6 = ProcSnmp6{} + ) + + for scanner.Scan() { + stat := strings.Fields(scanner.Text()) + if len(stat) < 2 { + continue + } + // Expect to have "6" in metric name, skip line otherwise + if sixIndex := strings.Index(stat[0], "6"); sixIndex != -1 { + protocol := stat[0][:sixIndex+1] + key := stat[0][sixIndex+1:] + value, err := strconv.ParseFloat(stat[1], 64) + if err != nil { + return procSnmp6, err + } + + switch protocol { + case "Ip6": + switch key { + case "InReceives": + procSnmp6.Ip6.InReceives = &value + case "InHdrErrors": + procSnmp6.Ip6.InHdrErrors = &value + case "InTooBigErrors": + procSnmp6.Ip6.InTooBigErrors = &value + case "InNoRoutes": + procSnmp6.Ip6.InNoRoutes = &value + case "InAddrErrors": + procSnmp6.Ip6.InAddrErrors = &value + case "InUnknownProtos": + procSnmp6.Ip6.InUnknownProtos = &value + case "InTruncatedPkts": + procSnmp6.Ip6.InTruncatedPkts = &value + case "InDiscards": + procSnmp6.Ip6.InDiscards = &value + case "InDelivers": + procSnmp6.Ip6.InDelivers = &value + case "OutForwDatagrams": + procSnmp6.Ip6.OutForwDatagrams = &value + case "OutRequests": + procSnmp6.Ip6.OutRequests = &value + case "OutDiscards": + procSnmp6.Ip6.OutDiscards = &value + case "OutNoRoutes": + procSnmp6.Ip6.OutNoRoutes = &value + case "ReasmTimeout": + procSnmp6.Ip6.ReasmTimeout = &value + case "ReasmReqds": + procSnmp6.Ip6.ReasmReqds = &value + case "ReasmOKs": + procSnmp6.Ip6.ReasmOKs = &value + case "ReasmFails": + procSnmp6.Ip6.ReasmFails = &value + case "FragOKs": + procSnmp6.Ip6.FragOKs = &value + case "FragFails": + procSnmp6.Ip6.FragFails = &value + case "FragCreates": + procSnmp6.Ip6.FragCreates = &value + case "InMcastPkts": + procSnmp6.Ip6.InMcastPkts = &value + case "OutMcastPkts": + procSnmp6.Ip6.OutMcastPkts = &value + case "InOctets": + procSnmp6.Ip6.InOctets = &value + case "OutOctets": + procSnmp6.Ip6.OutOctets = &value + case "InMcastOctets": + procSnmp6.Ip6.InMcastOctets = &value + case "OutMcastOctets": + procSnmp6.Ip6.OutMcastOctets = &value + case "InBcastOctets": + procSnmp6.Ip6.InBcastOctets = &value + case "OutBcastOctets": + procSnmp6.Ip6.OutBcastOctets = &value + case "InNoECTPkts": + procSnmp6.Ip6.InNoECTPkts = &value + case "InECT1Pkts": + procSnmp6.Ip6.InECT1Pkts = &value + case "InECT0Pkts": + procSnmp6.Ip6.InECT0Pkts = &value + case "InCEPkts": + procSnmp6.Ip6.InCEPkts = &value + + } + case "Icmp6": + switch key { + case "InMsgs": + procSnmp6.Icmp6.InMsgs = &value + case "InErrors": + procSnmp6.Icmp6.InErrors = &value + case "OutMsgs": + procSnmp6.Icmp6.OutMsgs = &value + case "OutErrors": + procSnmp6.Icmp6.OutErrors = &value + case "InCsumErrors": + procSnmp6.Icmp6.InCsumErrors = &value + case "InDestUnreachs": + procSnmp6.Icmp6.InDestUnreachs = &value + case "InPktTooBigs": + procSnmp6.Icmp6.InPktTooBigs = &value + case "InTimeExcds": + procSnmp6.Icmp6.InTimeExcds = &value + case "InParmProblems": + procSnmp6.Icmp6.InParmProblems = &value + case "InEchos": + procSnmp6.Icmp6.InEchos = &value + case "InEchoReplies": + procSnmp6.Icmp6.InEchoReplies = &value + case "InGroupMembQueries": + procSnmp6.Icmp6.InGroupMembQueries = &value + case "InGroupMembResponses": + procSnmp6.Icmp6.InGroupMembResponses = &value + case "InGroupMembReductions": + procSnmp6.Icmp6.InGroupMembReductions = &value + case "InRouterSolicits": + procSnmp6.Icmp6.InRouterSolicits = &value + case "InRouterAdvertisements": + procSnmp6.Icmp6.InRouterAdvertisements = &value + case "InNeighborSolicits": + procSnmp6.Icmp6.InNeighborSolicits = &value + case "InNeighborAdvertisements": + procSnmp6.Icmp6.InNeighborAdvertisements = &value + case "InRedirects": + procSnmp6.Icmp6.InRedirects = &value + case "InMLDv2Reports": + procSnmp6.Icmp6.InMLDv2Reports = &value + case "OutDestUnreachs": + procSnmp6.Icmp6.OutDestUnreachs = &value + case "OutPktTooBigs": + procSnmp6.Icmp6.OutPktTooBigs = &value + case "OutTimeExcds": + procSnmp6.Icmp6.OutTimeExcds = &value + case "OutParmProblems": + procSnmp6.Icmp6.OutParmProblems = &value + case "OutEchos": + procSnmp6.Icmp6.OutEchos = &value + case "OutEchoReplies": + procSnmp6.Icmp6.OutEchoReplies = &value + case "OutGroupMembQueries": + procSnmp6.Icmp6.OutGroupMembQueries = &value + case "OutGroupMembResponses": + procSnmp6.Icmp6.OutGroupMembResponses = &value + case "OutGroupMembReductions": + procSnmp6.Icmp6.OutGroupMembReductions = &value + case "OutRouterSolicits": + procSnmp6.Icmp6.OutRouterSolicits = &value + case "OutRouterAdvertisements": + procSnmp6.Icmp6.OutRouterAdvertisements = &value + case "OutNeighborSolicits": + procSnmp6.Icmp6.OutNeighborSolicits = &value + case "OutNeighborAdvertisements": + procSnmp6.Icmp6.OutNeighborAdvertisements = &value + case "OutRedirects": + procSnmp6.Icmp6.OutRedirects = &value + case "OutMLDv2Reports": + procSnmp6.Icmp6.OutMLDv2Reports = &value + case "InType1": + procSnmp6.Icmp6.InType1 = &value + case "InType134": + procSnmp6.Icmp6.InType134 = &value + case "InType135": + procSnmp6.Icmp6.InType135 = &value + case "InType136": + procSnmp6.Icmp6.InType136 = &value + case "InType143": + procSnmp6.Icmp6.InType143 = &value + case "OutType133": + procSnmp6.Icmp6.OutType133 = &value + case "OutType135": + procSnmp6.Icmp6.OutType135 = &value + case "OutType136": + procSnmp6.Icmp6.OutType136 = &value + case "OutType143": + procSnmp6.Icmp6.OutType143 = &value + } + case "Udp6": + switch key { + case "InDatagrams": + procSnmp6.Udp6.InDatagrams = &value + case "NoPorts": + procSnmp6.Udp6.NoPorts = &value + case "InErrors": + procSnmp6.Udp6.InErrors = &value + case "OutDatagrams": + procSnmp6.Udp6.OutDatagrams = &value + case "RcvbufErrors": + procSnmp6.Udp6.RcvbufErrors = &value + case "SndbufErrors": + procSnmp6.Udp6.SndbufErrors = &value + case "InCsumErrors": + procSnmp6.Udp6.InCsumErrors = &value + case "IgnoredMulti": + procSnmp6.Udp6.IgnoredMulti = &value + } + case "UdpLite6": + switch key { + case "InDatagrams": + procSnmp6.UdpLite6.InDatagrams = &value + case "NoPorts": + procSnmp6.UdpLite6.NoPorts = &value + case "InErrors": + procSnmp6.UdpLite6.InErrors = &value + case "OutDatagrams": + procSnmp6.UdpLite6.OutDatagrams = &value + case "RcvbufErrors": + procSnmp6.UdpLite6.RcvbufErrors = &value + case "SndbufErrors": + procSnmp6.UdpLite6.SndbufErrors = &value + case "InCsumErrors": + procSnmp6.UdpLite6.InCsumErrors = &value + } + } + } + } + return procSnmp6, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 8c7b6e80a..b278eb2c2 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -81,10 +81,10 @@ type ProcStat struct { STime uint // Amount of time that this process's waited-for children have been // scheduled in user mode, measured in clock ticks. - CUTime uint + CUTime int // Amount of time that this process's waited-for children have been // scheduled in kernel mode, measured in clock ticks. - CSTime uint + CSTime int // For processes running a real-time scheduling policy, this is the negated // scheduling priority, minus one. Priority int @@ -102,6 +102,8 @@ type ProcStat struct { RSS int // Soft limit in bytes on the rss of the process. RSSLimit uint64 + // CPU number last executed on. + Processor uint // Real-time scheduling priority, a number in the range 1 to 99 for processes // scheduled under a real-time policy, or 0, for non-real-time processes. RTPriority uint @@ -115,7 +117,7 @@ type ProcStat struct { // NewStat returns the current status information of the process. // -// Deprecated: use p.Stat() instead +// Deprecated: Use p.Stat() instead. func (p Proc) NewStat() (ProcStat, error) { return p.Stat() } @@ -141,6 +143,11 @@ func (p Proc) Stat() (ProcStat, error) { } s.Comm = string(data[l+1 : r]) + + // Check the following resources for the details about the particular stat + // fields and their data types: + // * https://man7.org/linux/man-pages/man5/proc.5.html + // * https://man7.org/linux/man-pages/man3/scanf.3.html _, err = fmt.Fscan( bytes.NewBuffer(data[r+2:]), &s.State, @@ -179,7 +186,7 @@ func (p Proc) Stat() (ProcStat, error) { &ignoreUint64, &ignoreUint64, &ignoreInt64, - &ignoreInt64, + &s.Processor, &s.RTPriority, &s.Policy, &s.DelayAcctBlkIOTicks, diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 6edd8333b..3d8c06439 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -33,37 +33,37 @@ type ProcStatus struct { TGID int // Peak virtual memory size. - VmPeak uint64 // nolint:golint + VmPeak uint64 // nolint:revive // Virtual memory size. - VmSize uint64 // nolint:golint + VmSize uint64 // nolint:revive // Locked memory size. - VmLck uint64 // nolint:golint + VmLck uint64 // nolint:revive // Pinned memory size. - VmPin uint64 // nolint:golint + VmPin uint64 // nolint:revive // Peak resident set size. - VmHWM uint64 // nolint:golint + VmHWM uint64 // nolint:revive // Resident set size (sum of RssAnnon RssFile and RssShmem). - VmRSS uint64 // nolint:golint + VmRSS uint64 // nolint:revive // Size of resident anonymous memory. - RssAnon uint64 // nolint:golint + RssAnon uint64 // nolint:revive // Size of resident file mappings. - RssFile uint64 // nolint:golint + RssFile uint64 // nolint:revive // Size of resident shared memory. - RssShmem uint64 // nolint:golint + RssShmem uint64 // nolint:revive // Size of data segments. - VmData uint64 // nolint:golint + VmData uint64 // nolint:revive // Size of stack segments. - VmStk uint64 // nolint:golint + VmStk uint64 // nolint:revive // Size of text segments. - VmExe uint64 // nolint:golint + VmExe uint64 // nolint:revive // Shared library code size. - VmLib uint64 // nolint:golint + VmLib uint64 // nolint:revive // Page table entries size. - VmPTE uint64 // nolint:golint + VmPTE uint64 // nolint:revive // Size of second-level page tables. - VmPMD uint64 // nolint:golint + VmPMD uint64 // nolint:revive // Swapped-out virtual memory size by anonymous private. - VmSwap uint64 // nolint:golint + VmSwap uint64 // nolint:revive // Size of hugetlb memory portions HugetlbPages uint64 @@ -96,10 +96,10 @@ func (p Proc) NewStatus() (ProcStatus, error) { kv := strings.SplitN(line, ":", 2) // removes spaces - k := string(strings.TrimSpace(kv[0])) - v := string(strings.TrimSpace(kv[1])) + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) // removes "kB" - v = string(bytes.Trim([]byte(v), " kB")) + v = strings.TrimSuffix(v, " kB") // value to int when possible // we can skip error check here, 'cause vKBytes is not used when value is a string diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go new file mode 100644 index 000000000..d46533ebf --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -0,0 +1,51 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +func sysctlToPath(sysctl string) string { + return strings.Replace(sysctl, ".", "/", -1) +} + +func (fs FS) SysctlStrings(sysctl string) ([]string, error) { + value, err := util.SysReadFile(fs.proc.Path("sys", sysctlToPath(sysctl))) + if err != nil { + return nil, err + } + return strings.Fields(value), nil + +} + +func (fs FS) SysctlInts(sysctl string) ([]int, error) { + fields, err := fs.SysctlStrings(sysctl) + if err != nil { + return nil, err + } + + values := make([]int, len(fields)) + for i, f := range fields { + vp := util.NewValueParser(f) + values[i] = vp.Int() + if err := vp.Err(); err != nil { + return nil, fmt.Errorf("field %d in sysctl %s is not a valid int: %w", i, sysctl, err) + } + } + return values, nil +} diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go index 28228164e..5f7f32dc8 100644 --- a/vendor/github.com/prometheus/procfs/schedstat.go +++ b/vendor/github.com/prometheus/procfs/schedstat.go @@ -40,7 +40,7 @@ type Schedstat struct { CPUs []*SchedstatCPU } -// SchedstatCPU contains the values from one "cpu" line +// SchedstatCPU contains the values from one "cpu" line. type SchedstatCPU struct { CPUNum string @@ -49,14 +49,14 @@ type SchedstatCPU struct { RunTimeslices uint64 } -// ProcSchedstat contains the values from /proc//schedstat +// ProcSchedstat contains the values from `/proc//schedstat`. type ProcSchedstat struct { RunningNanoseconds uint64 WaitingNanoseconds uint64 RunTimeslices uint64 } -// Schedstat reads data from /proc/schedstat +// Schedstat reads data from `/proc/schedstat`. func (fs FS) Schedstat() (*Schedstat, error) { file, err := os.Open(fs.proc.Path("schedstat")) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go index 7896fd724..bc9aaf5c2 100644 --- a/vendor/github.com/prometheus/procfs/slab.go +++ b/vendor/github.com/prometheus/procfs/slab.go @@ -137,7 +137,7 @@ func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) { return s, nil } -// SlabInfo reads data from /proc/slabinfo +// SlabInfo reads data from `/proc/slabinfo`. func (fs FS) SlabInfo() (SlabInfo, error) { // TODO: Consider passing options to allow for parsing different // slabinfo versions. However, slabinfo 2.1 has been stable since diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go new file mode 100644 index 000000000..559129cbc --- /dev/null +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -0,0 +1,160 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Softirqs represents the softirq statistics. +type Softirqs struct { + Hi []uint64 + Timer []uint64 + NetTx []uint64 + NetRx []uint64 + Block []uint64 + IRQPoll []uint64 + Tasklet []uint64 + Sched []uint64 + HRTimer []uint64 + RCU []uint64 +} + +func (fs FS) Softirqs() (Softirqs, error) { + fileName := fs.proc.Path("softirqs") + data, err := util.ReadFileNoStat(fileName) + if err != nil { + return Softirqs{}, err + } + + reader := bytes.NewReader(data) + + return parseSoftirqs(reader) +} + +func parseSoftirqs(r io.Reader) (Softirqs, error) { + var ( + softirqs = Softirqs{} + scanner = bufio.NewScanner(r) + ) + + if !scanner.Scan() { + return Softirqs{}, fmt.Errorf("softirqs empty") + } + + for scanner.Scan() { + parts := strings.Fields(scanner.Text()) + var err error + + // require at least one cpu + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "HI:": + perCPU := parts[1:] + softirqs.Hi = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (HI%d): %w", count, i, err) + } + } + case parts[0] == "TIMER:": + perCPU := parts[1:] + softirqs.Timer = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (TIMER%d): %w", count, i, err) + } + } + case parts[0] == "NET_TX:": + perCPU := parts[1:] + softirqs.NetTx = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_TX%d): %w", count, i, err) + } + } + case parts[0] == "NET_RX:": + perCPU := parts[1:] + softirqs.NetRx = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_RX%d): %w", count, i, err) + } + } + case parts[0] == "BLOCK:": + perCPU := parts[1:] + softirqs.Block = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (BLOCK%d): %w", count, i, err) + } + } + case parts[0] == "IRQ_POLL:": + perCPU := parts[1:] + softirqs.IRQPoll = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (IRQ_POLL%d): %w", count, i, err) + } + } + case parts[0] == "TASKLET:": + perCPU := parts[1:] + softirqs.Tasklet = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (TASKLET%d): %w", count, i, err) + } + } + case parts[0] == "SCHED:": + perCPU := parts[1:] + softirqs.Sched = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (SCHED%d): %w", count, i, err) + } + } + case parts[0] == "HRTIMER:": + perCPU := parts[1:] + softirqs.HRTimer = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (HRTIMER%d): %w", count, i, err) + } + } + case parts[0] == "RCU:": + perCPU := parts[1:] + softirqs.RCU = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (RCU%d): %w", count, i, err) + } + } + } + } + + if err := scanner.Err(); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse softirqs: %w", err) + } + + return softirqs, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 6d8727541..586af48af 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -41,7 +41,7 @@ type CPUStat struct { // SoftIRQStat represent the softirq statistics as exported in the procfs stat file. // A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html -// It is possible to get per-cpu stats by reading /proc/softirqs +// It is possible to get per-cpu stats by reading `/proc/softirqs`. type SoftIRQStat struct { Hi uint64 Timer uint64 @@ -62,7 +62,7 @@ type Stat struct { // Summed up cpu statistics. CPUTotal CPUStat // Per-CPU statistics. - CPU []CPUStat + CPU map[int64]CPUStat // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. IRQTotal uint64 // Number of times a numbered IRQ was triggered. @@ -145,7 +145,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { // NewStat returns information about current cpu/process statistics. // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt // -// Deprecated: use fs.Stat() instead +// Deprecated: Use fs.Stat() instead. func NewStat() (Stat, error) { fs, err := NewFS(fs.DefaultProcMountPoint) if err != nil { @@ -155,25 +155,38 @@ func NewStat() (Stat, error) { } // NewStat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt // -// Deprecated: use fs.Stat() instead +// Deprecated: Use fs.Stat() instead. func (fs FS) NewStat() (Stat, error) { return fs.Stat() } // Stat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt func (fs FS) Stat() (Stat, error) { fileName := fs.proc.Path("stat") data, err := util.ReadFileNoStat(fileName) if err != nil { return Stat{}, err } + procStat, err := parseStat(bytes.NewReader(data), fileName) + if err != nil { + return Stat{}, err + } + return procStat, nil +} - stat := Stat{} +// parseStat parses the metrics from /proc/[pid]/stat. +func parseStat(r io.Reader, fileName string) (Stat, error) { + var ( + scanner = bufio.NewScanner(r) + stat = Stat{ + CPU: make(map[int64]CPUStat), + } + err error + ) - scanner := bufio.NewScanner(bytes.NewReader(data)) for scanner.Scan() { line := scanner.Text() parts := strings.Fields(scanner.Text()) @@ -228,9 +241,6 @@ func (fs FS) Stat() (Stat, error) { if cpuID == -1 { stat.CPUTotal = cpuStat } else { - for int64(len(stat.CPU)) <= cpuID { - stat.CPU = append(stat.CPU, CPUStat{}) - } stat.CPU[cpuID] = cpuStat } } diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go new file mode 100644 index 000000000..f08bfc769 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -0,0 +1,79 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "strconv" + + fsi "github.com/prometheus/procfs/internal/fs" +) + +// Provide access to /proc/PID/task/TID files, for thread specific values. Since +// such files have the same structure as /proc/PID/ ones, the data structures +// and the parsers for the latter may be reused. + +// AllThreads returns a list of all currently available threads under /proc/PID. +func AllThreads(pid int) (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllThreads(pid) +} + +// AllThreads returns a list of all currently available threads for PID. +func (fs FS) AllThreads(pid int) (Procs, error) { + taskPath := fs.proc.Path(strconv.Itoa(pid), "task") + d, err := os.Open(taskPath) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) + } + + t := Procs{} + for _, n := range names { + tid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)}) + } + + return t, nil +} + +// Thread returns a process for a given PID, TID. +func (fs FS) Thread(pid, tid int) (Proc, error) { + taskPath := fs.proc.Path(strconv.Itoa(pid), "task") + if _, err := os.Stat(taskPath); err != nil { + return Proc{}, err + } + return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil +} + +// Thread returns a process for a given TID of Proc. +func (proc Proc) Thread(tid int) (Proc, error) { + tfs := fsi.FS(proc.path("task")) + if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil { + return Proc{}, err + } + return Proc{PID: tid, fs: tfs}, nil +} diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go index cb1389141..cdedcae99 100644 --- a/vendor/github.com/prometheus/procfs/vm.go +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -11,13 +11,13 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package procfs import ( "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -26,10 +26,12 @@ import ( ) // The VM interface is described at -// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// // Each setting is exposed as a single file. // Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array -// and numa_zonelist_order (deprecated) which is a string +// and numa_zonelist_order (deprecated) which is a string. type VM struct { AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes BlockDump *int64 // /proc/sys/vm/block_dump @@ -87,7 +89,7 @@ func (fs FS) VM() (*VM, error) { return nil, fmt.Errorf("%s is not a directory", path) } - files, err := ioutil.ReadDir(path) + files, err := os.ReadDir(path) if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go index 209e2ac98..c745a4c04 100644 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package procfs @@ -18,7 +19,7 @@ package procfs import ( "bytes" "fmt" - "io/ioutil" + "os" "regexp" "strings" @@ -72,7 +73,7 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) // structs containing the relevant info. More information available here: // https://www.kernel.org/doc/Documentation/sysctl/vm.txt func (fs FS) Zoneinfo() ([]Zoneinfo, error) { - data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo")) + data, err := os.ReadFile(fs.proc.Path("zoneinfo")) if err != nil { return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index fa1245b18..2924cf3a1 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -8,7 +8,6 @@ import ( "fmt" "math" "os" - "path/filepath" "reflect" "regexp" "runtime" @@ -141,12 +140,11 @@ func CallerInfo() []string { } parts := strings.Split(file, "/") - file = parts[len(parts)-1] if len(parts) > 1 { + filename := parts[len(parts)-1] dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - path, _ := filepath.Abs(file) - callers = append(callers, fmt.Sprintf("%s:%d", path, line)) + if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) } } @@ -530,7 +528,7 @@ func isNil(object interface{}) bool { []reflect.Kind{ reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice}, + reflect.Ptr, reflect.Slice, reflect.UnsafePointer}, kind) if isNilableKind && value.IsNil() { @@ -818,49 +816,44 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true // we consider nil to be equal to the nil set } - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } + subsetKind := reflect.TypeOf(subset).Kind() if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) if subsetKind == reflect.Map && listKind == reflect.Map { - listValue := reflect.ValueOf(list) - subsetKeys := subsetValue.MapKeys() + subsetMap := reflect.ValueOf(subset) + actualMap := reflect.ValueOf(list) - for i := 0; i < len(subsetKeys); i++ { - subsetKey := subsetKeys[i] - subsetElement := subsetValue.MapIndex(subsetKey).Interface() - listElement := listValue.MapIndex(subsetKey).Interface() + for _, k := range subsetMap.MapKeys() { + ev := subsetMap.MapIndex(k) + av := actualMap.MapIndex(k) - if !ObjectsAreEqual(subsetElement, listElement) { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...) + if !av.IsValid() { + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...) + } + if !ObjectsAreEqual(ev.Interface(), av.Interface()) { + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...) } } return true } - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() + subsetList := reflect.ValueOf(subset) + for i := 0; i < subsetList.Len(); i++ { + element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", list), msgAndArgs...) } if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, element), msgAndArgs...) } } @@ -879,34 +872,28 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) } - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } + subsetKind := reflect.TypeOf(subset).Kind() if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) if subsetKind == reflect.Map && listKind == reflect.Map { - listValue := reflect.ValueOf(list) - subsetKeys := subsetValue.MapKeys() + subsetMap := reflect.ValueOf(subset) + actualMap := reflect.ValueOf(list) - for i := 0; i < len(subsetKeys); i++ { - subsetKey := subsetKeys[i] - subsetElement := subsetValue.MapIndex(subsetKey).Interface() - listElement := listValue.MapIndex(subsetKey).Interface() + for _, k := range subsetMap.MapKeys() { + ev := subsetMap.MapIndex(k) + av := actualMap.MapIndex(k) - if !ObjectsAreEqual(subsetElement, listElement) { + if !av.IsValid() { + return true + } + if !ObjectsAreEqual(ev.Interface(), av.Interface()) { return true } } @@ -914,8 +901,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) } - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() + subsetList := reflect.ValueOf(subset) + for i := 0; i < subsetList.Len(); i++ { + element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go index f0af8246c..e6ff8dfeb 100644 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -218,16 +218,22 @@ func (c *Call) Unset() *Call { foundMatchingCall := false - for i, call := range c.Parent.ExpectedCalls { + // in-place filter slice for calls to be removed - iterate from 0'th to last skipping unnecessary ones + var index int // write index + for _, call := range c.Parent.ExpectedCalls { if call.Method == c.Method { _, diffCount := call.Arguments.Diff(c.Arguments) if diffCount == 0 { foundMatchingCall = true - // Remove from ExpectedCalls - c.Parent.ExpectedCalls = append(c.Parent.ExpectedCalls[:i], c.Parent.ExpectedCalls[i+1:]...) + // Remove from ExpectedCalls - just skip it + continue } } + c.Parent.ExpectedCalls[index] = call + index++ } + // trim slice up to last copied index + c.Parent.ExpectedCalls = c.Parent.ExpectedCalls[:index] if !foundMatchingCall { unlockOnce.Do(c.unlock) diff --git a/vendor/github.com/vladimirvivien/gexe/README.md b/vendor/github.com/vladimirvivien/gexe/README.md index 2b8dd0050..0a956dd6b 100644 --- a/vendor/github.com/vladimirvivien/gexe/README.md +++ b/vendor/github.com/vladimirvivien/gexe/README.md @@ -2,38 +2,38 @@ [![Go Report Card](https://goreportcard.com/badge/github.com/vladimirvivien/echo)](https://goreportcard.com/report/github.com/vladimirvivien/echo) ![Build](https://github.com/vladimirvivien/gexe/actions/workflows/build.yml/badge.svg) # Project `gexe` -Script-like OS interaction wrapped in the security and type safety of the Go programming language! +Package with script-like API for system operation and automation! -The goal of project `gexe` is to make it dead simple to write code that interacts with the OS (and/or other components) with the type safety of the Go programming language. +The goal of project `gexe` is to make it simple to write code for system operation and task automation using a script-like API that offers the security and the type safety of the Go programming language (see [/examples](/examples/)). > NOTE: this project got renamed from Echo to Gexe (see Project Name Change) ## What can you do with `gexe`? -* Parse and execute OS commands provided as plain and clear text as you would in a shell. +* Parse and execute OS plain text commands, as you would in a shell. * Support for variable expansion in command string (i.e. `gexe.Run("echo $HOME")`) +* Ability to pipe processes: `gexe.Pipe("cat /etc/hosts", "wc -l")` +* Run processes concurrently: `gexe.RunConcur('wget https://example.com/files'; "date")` * Get process information (i.e. PID, status, exit code, etc) -* Stream data from stdout while process is executing * Get program information (i.e. args, binary name, working dir, etc) -* Easily read file content into different targets (string, bytes, io.Writer, etc) -* Easily write file content from different sources (i.e. string, bytes, io.Reader, etc) +* Easily read and write file content using different sources (string, bytes, io.Writer, etc) * Integrate with your shell script using `go run` ## Using `gexe` ### Get the package -```bash= +```bash go get github.com/vladimirvivien/gexe ``` ### Run a process The following executes command `echo "Hello World!"` and prints the result: -```go= +```go fmt.Println(gexe.Run(`echo "Hello World!"`)) ``` Alternatively, you can create your own `gexe` session for more control and error hanlding: -```go= +```go g := gexe.New() proc := g.RunProc(`echo "Hello World"`) if proc.Err() != nil { diff --git a/vendor/github.com/vladimirvivien/gexe/TODO.md b/vendor/github.com/vladimirvivien/gexe/TODO.md index ec28d145d..0d9bb6c47 100644 --- a/vendor/github.com/vladimirvivien/gexe/TODO.md +++ b/vendor/github.com/vladimirvivien/gexe/TODO.md @@ -1,9 +1,10 @@ -# Upcoming Tasks -The following are high-level tasks that will be applied for upcoming release cycles. +# Important tasks +The following are high-level tasks that will be considered for upcoming release cycles. -* [ ] Introduce Windows support (#27) -* [ ] Support for scatter/gather exec commands -* [ ] Support for concurrent exec of os commands -* [ ] Map program flags (#20) -* [ ] Piping/chaining OS exec commands (#29) -* [ ] Apply code quality changes +* [ ] Support and test on Windows platform (#27) +* [ ] Ability to map and access program flags (#20) +* [ ] Provide a simple API to submit HTTP requests and retrieve HTTP documents (think of wget/curl) +* [x] Support for scatter/gather exec commands +* [x] Support for concurrent exec of os commands +* [x] Piping/chaining OS exec commands (#29) +* [x] Apply code quality changes \ No newline at end of file diff --git a/vendor/github.com/vladimirvivien/gexe/exec/builder.go b/vendor/github.com/vladimirvivien/gexe/exec/builder.go index 8b726ea73..93709dcde 100644 --- a/vendor/github.com/vladimirvivien/gexe/exec/builder.go +++ b/vendor/github.com/vladimirvivien/gexe/exec/builder.go @@ -7,20 +7,52 @@ import ( type CommandPolicy byte const ( - CmdOnErrContinue CommandPolicy = 1 << iota - CmdOnErrExit - CmdExecSerial - CmdExecConcurrent - CmdExecPipe + ExitOnErrPolicy CommandPolicy = 1 << iota + ConcurrentExecPolicy ) -type CommandProcs struct { - procs []*Proc +// CommandResult stores results of executed commands using the CommandBuilder +type CommandResult struct { + mu sync.RWMutex + workChan chan *Proc + procs []*Proc + errProcs []*Proc } + +func (cr *CommandResult) Procs() []*Proc { + cr.mu.RLock() + defer cr.mu.RUnlock() + return cr.procs +} +func (cr *CommandResult) ErrProcs() []*Proc { + cr.mu.RLock() + defer cr.mu.RUnlock() + return cr.errProcs +} + +type PipedCommandResult struct { + procs []*Proc + errProcs []*Proc + lastProc *Proc +} + +func (cr *PipedCommandResult) Procs() []*Proc { + return cr.procs +} +func (cr *PipedCommandResult) ErrProcs() []*Proc { + return cr.errProcs +} +func (cr *PipedCommandResult) LastProc() *Proc { + procLen := len(cr.procs) + if procLen == 0 { + return nil + } + return cr.procs[procLen-1] +} + type CommandBuilder struct { cmdPolicy CommandPolicy procs []*Proc - procChan chan *Proc } // Commands creates a *CommandBuilder used to collect @@ -47,97 +79,162 @@ func (cb *CommandBuilder) Add(cmds ...string) *CommandBuilder { return cb } -// Run is a shortcut for executing the procs serially: -// -// cb.WithPolicy(CmdOnErrContinue).Start().Wait() -// -func (cb *CommandBuilder) Run() CommandProcs { - return cb.WithPolicy(CmdOnErrContinue).Start().Wait() -} +// Run executes all commands successively and waits for all of the result. The result of each individual +// command can be accessed from CommandResult.Procs[] after the execution completes. If policy == ExitOnErrPolicy, the +// execution will stop on the first error encountered, otherwise it will continue. Processes with errors can be accessed +// from CommandResult.ErrProcs. +func (cb *CommandBuilder) Run() *CommandResult { + var result CommandResult + for _, p := range cb.procs { + result.procs = append(result.procs, p) + if err := cb.runCommand(p); err != nil { + result.errProcs = append(result.errProcs, p) + if hasPolicy(cb.cmdPolicy, ExitOnErrPolicy) { + break + } + continue + } + } -// ConcurRun is a shortcut for executing procs concurrently: -// -// cb.WithPolicy(CmdExecConcurrent).Start().Wait() -// -func (cb *CommandBuilder) ConcurRun() CommandProcs { - return cb.WithPolicy(CmdOnErrContinue | CmdExecConcurrent).Start().Wait() + return &result } -// Start starts running the registered procs serially and returns immediately. -// This should be followed by a call to Wait to retrieve results. -func (cb *CommandBuilder) Start() *CommandBuilder { - if len(cb.procs) == 0 { - return cb - } - - cb.procChan = make(chan *Proc, len(cb.procs)) - switch { - case hasPolicy(cb.cmdPolicy, CmdExecConcurrent): - // launch each command in its own goroutine - go func() { - defer close(cb.procChan) +// Start starts all processes sequentially by default, or concurrently if ConcurrentExecPolicy is set, and does not wait for the commands +// to complete. Use CommandResult.Wait to wait for the processes to complete. Then, the result of each command can be accessed +// from CommandResult.Procs[] or CommandResult.ErrProcs to access failed processses. If policy == ExitOnErrPolicy, the execution will halt +// on the first error encountered, otherwise it will continue. +func (cb *CommandBuilder) Start() *CommandResult { + result := &CommandResult{workChan: make(chan *Proc, len(cb.procs))} + go func(builder *CommandBuilder, cr *CommandResult) { + defer close(cr.workChan) + + // start with concurrently + if hasPolicy(builder.cmdPolicy, ConcurrentExecPolicy) { var gate sync.WaitGroup - for _, proc := range cb.procs { + for _, proc := range builder.procs { + cr.mu.Lock() + cr.procs = append(cr.procs, proc) + cr.mu.Unlock() + proc.cmd.Stdout = proc.result + proc.cmd.Stderr = proc.result + gate.Add(1) - go func(wg *sync.WaitGroup, ch chan<- *Proc, p *Proc) { - defer wg.Done() - ch <- p.Start() - }(&gate, cb.procChan, proc) + go func(conProc *Proc, conResult *CommandResult) { + conResult.mu.Lock() + defer conResult.mu.Unlock() + defer gate.Done() + if err := conProc.Start().Err(); err != nil { + cr.errProcs = append(cr.errProcs, conProc) + return + } + conResult.workChan <- conProc + }(proc, cr) } - // wait for procs to launch gate.Wait() - }() - - case hasPolicy(cb.cmdPolicy, CmdExecPipe): - // pipe successive commands serially - go func(ch chan<- *Proc) { - defer close(cb.procChan) - if len(cb.procs) == 1 { - ch <- cb.procs[0].Start() - return - } - }(cb.procChan) - default: - // launch all procs (serially), return immediately - go func(ch chan<- *Proc) { - defer close(cb.procChan) - for _, proc := range cb.procs { - ch <- proc.Start() + return + } + + // start sequentially + for _, proc := range builder.procs { + cr.mu.Lock() + cr.procs = append(cr.procs, proc) + cr.mu.Unlock() + proc.cmd.Stdout = proc.result + proc.cmd.Stderr = proc.result + + // start sequentially + if err := proc.Start().Err(); err != nil { + cr.mu.Lock() + cr.errProcs = append(cr.errProcs, proc) + cr.mu.Unlock() + if hasPolicy(builder.cmdPolicy, ExitOnErrPolicy) { + break + } + continue } - }(cb.procChan) - } - return cb + + cr.workChan <- proc + } + }(cb, result) + + return result } -func (cb *CommandBuilder) Wait() CommandProcs { - if len(cb.procs) == 0 || cb.procChan == nil { - return CommandProcs{procs: []*Proc{}} +// Concurr starts all processes concurrently and does not wait for the commands +// to complete. It is equivalent to Commands(...).WithPolicy(ConcurrentExecPolicy).Start(). +func (cb *CommandBuilder) Concurr() *CommandResult { + cb.cmdPolicy = ConcurrentExecPolicy + return cb.Start() +} + +// Pipe executes each command serially chaining the combinedOutput of previous command to the inputPipe of next command. +func (cb *CommandBuilder) Pipe() *PipedCommandResult { + var result PipedCommandResult + procLen := len(cb.procs) + if procLen == 0 { + return nil } - var result CommandProcs - for proc := range cb.procChan { - result.procs = append(result.procs, proc) + last := procLen - 1 + result.lastProc = cb.procs[last] + result.lastProc.cmd.Stdout = result.lastProc.result + result.lastProc.cmd.Stderr = result.lastProc.result + + if procLen > 1 { + result.lastProc.cmd.Stdout = result.lastProc.result + // connect input/output between commands + for i, p := range cb.procs[:last] { + // link proc.Output to proc[next].Input + cb.procs[i+1].SetStdin(p.GetOutputPipe()) + p.cmd.Stderr = p.result + } + } - // check for start err - if proc.Err() != nil { - if hasPolicy(cb.cmdPolicy, CmdOnErrExit) { - break - } + // start each process (but, not wait for result) + // to ensure data flow between successive processes start + for _, p := range cb.procs { + result.procs = append(result.procs, p) + if err := p.Start().Err(); err != nil { + result.errProcs = append(result.errProcs, p) + return &result + } + } + + // wait and access processes result + for _, p := range cb.procs { + if err := p.Wait().Err(); err != nil { + result.errProcs = append(result.errProcs, p) + break } + } - // wait for command to complete + return &result +} + +func (cp *CommandBuilder) runCommand(proc *Proc) error { + // setup combined output for reach proc + proc.cmd.Stdout = proc.result + proc.cmd.Stderr = proc.result + + if err := proc.Start().Err(); err != nil { + return err + } + + if err := proc.Wait().Err(); err != nil { + return err + } + return nil +} + +func (cr *CommandResult) Wait() *CommandResult { + for proc := range cr.workChan { if err := proc.Wait().Err(); err != nil { - if hasPolicy(cb.cmdPolicy, CmdOnErrExit) { - break - } + cr.errProcs = append(cr.errProcs, proc) } } - return result + return cr } func hasPolicy(mask, pol CommandPolicy) bool { return (mask & pol) != 0 } - -// TODO - add termination methods -// - Pipe() - Runs each command, piping result of prev command into std input of next command diff --git a/vendor/github.com/vladimirvivien/gexe/exec/cmd_parser.go b/vendor/github.com/vladimirvivien/gexe/exec/cmd_parser.go index f6ddc68e5..3f51bd63e 100644 --- a/vendor/github.com/vladimirvivien/gexe/exec/cmd_parser.go +++ b/vendor/github.com/vladimirvivien/gexe/exec/cmd_parser.go @@ -9,11 +9,11 @@ import ( // parse parses space-separated string into words including quoted words: // -// aaa "bbb" "ccc ddd" '"eee ff"' +// aaa "bbb" "ccc ddd" '"eee ff"' // -// NB: -// - aaa"abcd": returns whole string. -// - "aaa"bbb: returns two words "aaa" and "bbb" +// NB: +// - aaa"abcd": returns whole string. +// - "aaa"bbb: returns two words "aaa" and "bbb" func parse(val string) ([]string, error) { rdr := bufio.NewReader(strings.NewReader(val)) var startQuote rune diff --git a/vendor/github.com/vladimirvivien/gexe/exec/proc.go b/vendor/github.com/vladimirvivien/gexe/exec/proc.go index 71f5368f0..00899a43f 100644 --- a/vendor/github.com/vladimirvivien/gexe/exec/proc.go +++ b/vendor/github.com/vladimirvivien/gexe/exec/proc.go @@ -16,9 +16,9 @@ type Proc struct { err error state *os.ProcessState result *bytes.Buffer - output io.Reader - stdoutPipe io.ReadCloser - stderrPipe io.ReadCloser + outputPipe io.ReadCloser + errorPipe io.ReadCloser + inputPipe io.WriteCloser cmd *osexec.Cmd process *os.Process } @@ -34,70 +34,65 @@ func NewProc(cmdStr string) *Proc { command := osexec.Command(words[0], words[1:]...) pipeout, outerr := command.StdoutPipe() pipeerr, errerr := command.StderrPipe() - output := io.MultiReader(pipeout, pipeerr) + //output := io.MultiReader(pipeout, pipeerr) if outerr != nil || errerr != nil { - return &Proc{err: fmt.Errorf("%s; %s", outerr, errerr)} + return &Proc{err: fmt.Errorf("combinedOutput pipe: %s; %s", outerr, errerr)} + } + + pipein, inerr := command.StdinPipe() + if inerr != nil { + return &Proc{err: fmt.Errorf("inputPipe err: %w", inerr)} } return &Proc{ cmd: command, - stdoutPipe: pipeout, - stderrPipe: pipeerr, - output: output, + outputPipe: pipeout, + errorPipe: pipeerr, + inputPipe: pipein, + result: new(bytes.Buffer), } } -// StartProc sets up and starts an OS process, but does not wait for -// it to complete (use proc.Wait for that) +// StartProc starts an OS process (setup a combined output of stdout, stderr) and does not wait for +// it to complete. You must follow this with either proc.Wait() to wait for result directly. Otherwise, +// call proc.Out() or proc.Result() which automatically waits and gather result. func StartProc(cmdStr string) *Proc { proc := NewProc(cmdStr) if proc.Err() != nil { return proc } + proc.cmd.Stdout = proc.result + proc.cmd.Stderr = proc.result return proc.Start() } -// RunProc creates, runs, and waits for a process to complete and -// return *Proc with result info. This call must be followed by -// Proc.Result() to access command result as a string value. -// NOTE: using proc.Out(), after this call, will be empty. +// RunProc starts a new process and waits for its completion. Use Proc.Out() or Proc.Result() +// to access the combined result from stdout and stderr. func RunProc(cmdStr string) *Proc { proc := StartProc(cmdStr) if proc.Err() != nil { - proc.err = fmt.Errorf("proc: runproc: %s", proc.Err()) - return proc - } - - proc.result = new(bytes.Buffer) - - if _, err := proc.result.ReadFrom(proc.output); err != nil { - proc.err = err return proc } - - if err := proc.Wait().Err(); err != nil { - proc.err = fmt.Errorf("proc: error: %s", proc.Err()) - } - + proc.Out() return proc } -// Run creates and runs a process and returns the -// result as a string. -// Equivalent to: Proc.RunProc() -> Proc.Result() +// Run creates and runs a process and waits for its result (combined stdin,stderr) returned as a string value. +// This is equivalent to calling Proc.RunProc() followed by Proc.Result(). func Run(cmdStr string) (result string) { - proc := StartProc(cmdStr) - if proc.Err() != nil { - proc.err = fmt.Errorf("proc: run: %s", proc.Err()) - return - } - return proc.Result() + return RunProc(cmdStr).Result() } -// Start starts the associated command as an OS process -// Errors can be accessed using p.Err() +// Start starts the associated command as an OS process and does not wait for its result. +// Ensure proper access to the process' input/output (stdin,stdout,stderr) has been +// setup prior to calling p.Start(). +// Use p.Err() to access any error that may have occured during execution. func (p *Proc) Start() *Proc { + if p.hasStarted() { + return p + } + if p.cmd == nil { p.err = fmt.Errorf("cmd is nill") return p @@ -115,7 +110,7 @@ func (p *Proc) Start() *Proc { return p } -// Commands returns the os/exec.Cmd that started the process +// Command returns the os/exec.Cmd that started the process func (p *Proc) Command() *osexec.Cmd { return p.cmd } @@ -126,9 +121,8 @@ func (p *Proc) Peek() *Proc { return p } -// Wait waits for a process started with Proc.StartProc to complete (in a separate goroutine). -// Once process completes, Wait cleans up resources. -// Must be called after Proc.StartProc() +// Wait waits for a process to complete (in a separate goroutine). +// Ensure p.Start() has been called prior to calling p.Wait() func (p *Proc) Wait() *Proc { if p.cmd == nil { p.err = fmt.Errorf("command is nill") @@ -141,6 +135,15 @@ func (p *Proc) Wait() *Proc { return p.Peek() } +// Run starts and wait for a process to complete. +// Before calling p.Run(), setup proper access to the process' input/output (i.e. stdin,stdout, stderr) +func (p *Proc) Run() *Proc { + if p.Start().Err() != nil { + return p + } + return p.Wait() +} + // ID returns process id func (p *Proc) ID() int { return p.id @@ -199,54 +202,81 @@ func (p *Proc) Kill() *Proc { return p } -// StdOut is an io.Reader pipe for standard out -// Must be streamed before Proc.Wait() -func (p *Proc) StdOut() io.Reader { - return p.stdoutPipe -} - -// StdErr is an io.Reader pipe for standard error -// Must be streamed before Proc.Wait(). -func (p *Proc) StdErr() io.Reader { - return p.stdoutPipe -} - -// Out waits for cmd result and surfaces the result from both stdout and stderr -// in an io.Reader that can be streamed. -// NOTE: Must be called after Proc.StartProc +// Out waits, after StartProc or Proc.Start has been called, for the cmd to complete +// and returns the combined result (Stdout and Stderr) as a single reader to be streamed. func (p *Proc) Out() io.Reader { - if p.output != nil { - return p.output + if !p.hasStarted() { + p.cmd.Stdout = p.result + p.cmd.Stderr = p.result + if err := p.Start().Err(); err != nil { + return strings.NewReader(fmt.Sprintf("proc: out failed: %s", err)) + } } - p.output = io.MultiReader(p.stdoutPipe, p.stderrPipe) - if err := p.Wait().Err(); err != nil { - p.err = fmt.Errorf("proc: out: failed to wait: %s", p.Err()) - return nil + if !p.Exited() { + if err := p.Wait().Err(); err != nil { + return strings.NewReader(fmt.Sprintf("proc: out: failed to wait: %s", err)) + } } - return p.output + return p.result } -// Result waits and copies the result of the combined stdout and stderr -// and returns its result as a string. -// NOTE: Must be called after Proc.StartProc, Proc.RunProc() or Proc.Run(). +// Result waits, after proc.Start or proc.StartProc has been called, for the cmd to complete +// and returns the combined stdout and stderr result as a string value. func (p *Proc) Result() string { - if p.result != nil { - return strings.TrimSpace(p.result.String()) + p.Out() + if p.Err() != nil { + return p.Err().Error() } + return strings.TrimSpace(p.result.String()) +} - p.result = new(bytes.Buffer) +// Stdin returns the standard input stream for the process +func (p *Proc) Stdin() io.Reader { + return p.cmd.Stdin +} - if _, err := p.result.ReadFrom(p.output); err != nil { - p.err = err - return "" - } +// SetStdin sets a stream for the process to read its input from +func (p *Proc) SetStdin(in io.Reader) { + p.cmd.Stdin = in +} - if err := p.Wait().Err(); err != nil { - p.err = fmt.Errorf("proc: result: %s", p.Err()) - return "" - } +// GetInputPipe returns a stream where the process input can be written to +func (p *Proc) GetInputPipe() io.Writer { + return p.inputPipe +} - return strings.TrimSpace(p.result.String()) +// Stdout returns the standard output stream for the process +func (p *Proc) Stdout() io.Writer { + return p.cmd.Stdout +} + +// SetStdout sets a stream where the process can write its output to +func (p *Proc) SetStdout(out io.Writer) { + p.cmd.Stdout = out +} + +// GetOutputPipe returns a stream where the process output can be read from +func (p *Proc) GetOutputPipe() io.Reader { + return p.outputPipe +} + +// Stderr returns the standard error stream for the process +func (p *Proc) Stderr() io.Writer { + return p.cmd.Stderr +} + +// SetStderr sets a stream where the process can write its errors to +func (p *Proc) SetStderr(out io.Writer) { + p.cmd.Stderr = out +} + +// GetErrorPipe returns a stream where the process error can be read from +func (p *Proc) GetErrorPipe() io.Reader { + return p.errorPipe +} + +func (p *Proc) hasStarted() bool { + return (p.cmd.Process != nil && p.cmd.Process.Pid != 0) } diff --git a/vendor/github.com/vladimirvivien/gexe/functions.go b/vendor/github.com/vladimirvivien/gexe/functions.go index a43b98aab..7f76889c0 100644 --- a/vendor/github.com/vladimirvivien/gexe/functions.go +++ b/vendor/github.com/vladimirvivien/gexe/functions.go @@ -3,6 +3,7 @@ package gexe import ( "github.com/vladimirvivien/gexe/exec" "github.com/vladimirvivien/gexe/fs" + "github.com/vladimirvivien/gexe/http" "github.com/vladimirvivien/gexe/prog" "github.com/vladimirvivien/gexe/vars" ) @@ -15,7 +16,7 @@ func Variables() *vars.Variables { // Envs declares environment variables using // a multi-line space-separated list: // -// Envs("GOOS=linux GOARCH=amd64") +// Envs("GOOS=linux GOARCH=amd64") // // Environment vars can be used in string values // using Eval("building for os=$GOOS") @@ -31,7 +32,7 @@ func SetEnv(name, value string) *Echo { // Vars declares session-scope variables using // a multi-line space-separated list: // -// Envs("foo=bar platform=amd64") +// Envs("foo=bar platform=amd64") // // Session vars can be used in string values // using Eval("My foo=$foo"). @@ -59,6 +60,12 @@ func Eval(str string) string { return DefaultEcho.Eval(str) } +// NewProc setups a new process with specified command cmdStr and returns immediately +// without starting. Information about the running process is stored in *exec.Proc. +func NewProc(cmdStr string) *exec.Proc { + return DefaultEcho.NewProc(cmdStr) +} + // StartProc executes the command in cmdStr and returns immediately // without waiting. Information about the running process is stored in *exec.Proc. func StartProc(cmdStr string) *exec.Proc { @@ -81,6 +88,41 @@ func Runout(cmdStr string) { DefaultEcho.Runout(cmdStr) } +// Commands returns a *exe.CommandBuilder to build a multi-command execution flow. +func Commands(cmdStrs ...string) *exec.CommandBuilder { + return DefaultEcho.Commands(cmdStrs...) +} + +// StartAll starts the exection of each command sequentially and +// does not wait for their completion. +func StartAll(cmdStrs ...string) *exec.CommandResult { + return DefaultEcho.StartAll(cmdStrs...) +} + +// RunAll executes each command, in cmdStrs, successively and wait for their +// completion. +func RunAll(cmdStrs ...string) *exec.CommandResult { + return DefaultEcho.RunAll(cmdStrs...) +} + +// StartConcur starts the exection of each command concurrently and +// does not wait for their completion. +func StartConcur(cmdStrs ...string) *exec.CommandResult { + return DefaultEcho.StartConcur(cmdStrs...) +} + +// RunConcur executes each command, in cmdStrs, concurrently and waits +// their completion. +func RunConcur(cmdStrs ...string) *exec.CommandResult { + return DefaultEcho.RunConcur(cmdStrs...) +} + +// Pipe executes each command, in cmdStrs, by piping the result +// of the previous command as input to the next command until done. +func Pipe(cmdStrs ...string) *exec.PipedCommandResult { + return DefaultEcho.Pipe(cmdStrs...) +} + // Read creates an fs.FileReader that // can be used to read content from files. func Read(path string) fs.FileReader { @@ -93,6 +135,16 @@ func Write(path string) fs.FileWriter { return DefaultEcho.Write(path) } +// GetUrl creates a *http.ResourceReader to retrieve HTTP content +func GetUrl(url string) *http.ResourceReader { + return DefaultEcho.Get(url) +} + +// PostUrl creates a *http.ResourceWriter to write content to an HTTP server +func PostUrl(url string) *http.ResourceWriter { + return DefaultEcho.Post(url) +} + // Prog returns program information via *prog.Info func Prog() *prog.Info { return DefaultEcho.Prog() diff --git a/vendor/github.com/vladimirvivien/gexe/http.go b/vendor/github.com/vladimirvivien/gexe/http.go new file mode 100644 index 000000000..a22406ee3 --- /dev/null +++ b/vendor/github.com/vladimirvivien/gexe/http.go @@ -0,0 +1,13 @@ +package gexe + +import "github.com/vladimirvivien/gexe/http" + +// Get creates a *http.ResourceReader to read resource content from HTTP server +func (e *Echo) Get(url string) *http.ResourceReader { + return http.Get(url) +} + +// Post creates a *http.ResourceWriter to write content to an HTTP server +func (e *Echo) Post(url string) *http.ResourceWriter { + return http.Post(url) +} diff --git a/vendor/github.com/vladimirvivien/gexe/http/http_reader.go b/vendor/github.com/vladimirvivien/gexe/http/http_reader.go new file mode 100644 index 000000000..16653bdf1 --- /dev/null +++ b/vendor/github.com/vladimirvivien/gexe/http/http_reader.go @@ -0,0 +1,84 @@ +package http + +import ( + "io" + "net/http" +) + +// ResourceReader provides types and methods to read content of resources from a server using HTTP +type ResourceReader struct { + client *http.Client + err error + url string + res *Response +} + +// Get initiates a "GET" operation for the specified resource +func Get(url string) *ResourceReader { + return &ResourceReader{url: url, client: &http.Client{}} +} + +// Err returns the last known error +func (r *ResourceReader) Err() error { + return r.err +} + +// Response returns the server's response info +func (r *ResourceReader) Response() *Response { + return r.res +} + +// Bytes returns the server response as a []byte +func (b *ResourceReader) Bytes() []byte { + if err := b.Do().Err(); err != nil { + b.err = err + return nil + } + return b.read() +} + +// String returns the server response as a string +func (b *ResourceReader) String() string { + if err := b.Do().Err(); err != nil { + b.err = err + return "" + } + return string(b.read()) +} + +// Body returns an io.ReadCloser to stream the server response. +// NOTE: ensure to close the stream when finished. +func (r *ResourceReader) Body() io.ReadCloser { + if err := r.Do().Err(); err != nil { + r.err = err + return nil + } + return r.res.body +} + +// Do invokes the client.Get to "GET" the content from server +func (r *ResourceReader) Do() *ResourceReader { + res, err := r.client.Get(r.url) + if err != nil { + r.err = err + r.res = &Response{} + return r + } + r.res = &Response{stat: res.Status, statCode: res.StatusCode, body: res.Body} + return r +} + +// read reads the content of the response body and returns a []byte +func (r *ResourceReader) read() []byte { + if r.res.body == nil { + return nil + } + + data, err := io.ReadAll(r.res.body) + defer r.res.body.Close() + if err != nil { + r.err = err + return nil + } + return data +} diff --git a/vendor/github.com/vladimirvivien/gexe/http/http_response.go b/vendor/github.com/vladimirvivien/gexe/http/http_response.go new file mode 100644 index 000000000..adf2de977 --- /dev/null +++ b/vendor/github.com/vladimirvivien/gexe/http/http_response.go @@ -0,0 +1,26 @@ +package http + +import "io" + +// Response stores high level metadata and responses from HTTP request results +type Response struct { + stat string + statCode int + body io.ReadCloser +} + +// Status returns the standard lib http.Response.Status value from the server +func (res *Response) Status() string { + return res.stat +} + +// StatusCode returns the standard lib http.Response.StatusCode value from the server +func (res *Response) StatusCode() int { + return res.statCode +} + +// Body is io.ReadCloser stream to the content from serve. +// NOTE: ensure to call Close() if used directly. +func (res *Response) Body() io.ReadCloser { + return res.body +} diff --git a/vendor/github.com/vladimirvivien/gexe/http/http_writer.go b/vendor/github.com/vladimirvivien/gexe/http/http_writer.go new file mode 100644 index 000000000..c3ea4acc2 --- /dev/null +++ b/vendor/github.com/vladimirvivien/gexe/http/http_writer.go @@ -0,0 +1,98 @@ +package http + +import ( + "bytes" + "io" + "net/http" + "net/url" + "strings" +) + +// ResourceWriter represents types and methods used to post resource data to an HTTP server +type ResourceWriter struct { + client *http.Client + err error + url string + headers http.Header + data io.Reader + res *Response +} + +// Post starts a "POST" HTTP operation to the provided resource. +func Post(resource string) *ResourceWriter { + return &ResourceWriter{url: resource, client: &http.Client{}, headers: make(http.Header)} +} + +// Err returns the last known error for the post operation +func (w *ResourceWriter) Err() error { + return w.err +} + +// Do is a terminal method that completes the post request of data to the HTTP server. +func (w *ResourceWriter) Do() *ResourceWriter { + req, err := http.NewRequest("POST", w.url, w.data) + if err != nil { + w.err = err + w.res = &Response{} + return w + } + + // set headers + req.Header = w.headers + + // post request + res, err := w.client.Do(req) + if err != nil { + w.err = err + w.res = &Response{} + return w + } + + w.res = &Response{stat: res.Status, statCode: res.StatusCode, body: res.Body} + + return w +} + +// WithHeaders sets all headers for the post operation +func (w *ResourceWriter) WithHeaders(h http.Header) *ResourceWriter { + w.headers = h + return w +} + +// AddHeader is a convenience method to add a single header +func (w *ResourceWriter) AddHeader(key, value string) *ResourceWriter { + w.headers.Add(key, value) + return w +} + +// SetHeader is a convenience method to sets a specific header +func (w *ResourceWriter) SetHeader(key, value string) *ResourceWriter { + w.headers.Set(key, value) + return w +} + +// String posts the string value as content to the server +func (w *ResourceWriter) String(val string) *ResourceWriter { + w.data = strings.NewReader(val) + return w.Do() +} + +// Bytes posts the slice of bytes as content to the server +func (w *ResourceWriter) Bytes(val []byte) *ResourceWriter { + w.data = bytes.NewReader(val) + return w.Do() +} + +// Body provides an io reader to stream content to the server +func (w *ResourceWriter) Body(val io.Reader) *ResourceWriter { + w.data = val + return w.Do() +} + +// FormData posts form-encoded data as content to the server +func (w *ResourceWriter) FormData(val map[string][]string) *ResourceWriter { + w.SetHeader("Content-Type", "application/x-www-form-urlencoded") + formData := url.Values(val) + w.data = strings.NewReader(formData.Encode()) + return w.Do() +} diff --git a/vendor/github.com/vladimirvivien/gexe/procs.go b/vendor/github.com/vladimirvivien/gexe/procs.go index 7378b51e2..251e579ff 100644 --- a/vendor/github.com/vladimirvivien/gexe/procs.go +++ b/vendor/github.com/vladimirvivien/gexe/procs.go @@ -6,8 +6,16 @@ import ( "github.com/vladimirvivien/gexe/exec" ) +// NewProc setups a new process with specified command cmdStr and returns immediately +// without starting. Use Proc.Wait to wait for exection and then retrieve process result. +// Information about the running process is stored in *exec.Proc. +func (e *Echo) NewProc(cmdStr string) *exec.Proc { + return exec.NewProc(cmdStr) +} + // StartProc executes the command in cmdStr and returns immediately -// without waiting. Information about the running process is stored in *Proc. +// without waiting. Use Proc.Wait to wait for exection and then retrieve process result. +// Information about the running process is stored in *Proc. func (e *Echo) StartProc(cmdStr string) *exec.Proc { return exec.StartProc(e.Eval(cmdStr)) } @@ -27,3 +35,55 @@ func (e *Echo) Run(cmdStr string) string { func (e *Echo) Runout(cmdStr string) { fmt.Print(e.Run(cmdStr)) } + +// Commands returns a *exe.CommandBuilder to build a multi-command execution flow. +func (e *Echo) Commands(cmdStrs ...string) *exec.CommandBuilder { + for i, cmd := range cmdStrs { + cmdStrs[i] = e.Eval(cmd) + } + return exec.Commands(cmdStrs...) +} + +// StartAll starts the sequential execution of each command, in cmdStrs, and does not +// wait for their completion. +func (e *Echo) StartAll(cmdStrs ...string) *exec.CommandResult { + for i, cmd := range cmdStrs { + cmdStrs[i] = e.Eval(cmd) + } + return exec.Commands(cmdStrs...).Start() +} + +// RunAll executes each command sequentially, in cmdStrs, and wait for their completion. +func (e *Echo) RunAll(cmdStrs ...string) *exec.CommandResult { + for i, cmd := range cmdStrs { + cmdStrs[i] = e.Eval(cmd) + } + return exec.Commands(cmdStrs...).Run() +} + +// StartConcur starts the concurrent execution of each command, in cmdStrs, and does not +// wait for their completion. +func (e *Echo) StartConcur(cmdStrs ...string) *exec.CommandResult { + for i, cmd := range cmdStrs { + cmdStrs[i] = e.Eval(cmd) + } + return exec.Commands(cmdStrs...).Concurr() +} + +// RunConcur executes each command concurrently, in cmdStrs, and waits +// their completion. +func (e *Echo) RunConcur(cmdStrs ...string) *exec.CommandResult { + for i, cmd := range cmdStrs { + cmdStrs[i] = e.Eval(cmd) + } + return exec.Commands(cmdStrs...).Concurr().Wait() +} + +// Pipe executes each command, in cmdStrs, by piping the result +// of the previous command as input to the next command until done. +func (e *Echo) Pipe(cmdStrs ...string) *exec.PipedCommandResult { + for i, cmd := range cmdStrs { + cmdStrs[i] = e.Eval(cmd) + } + return exec.Commands(cmdStrs...).Pipe() +} diff --git a/vendor/github.com/vladimirvivien/gexe/variables.go b/vendor/github.com/vladimirvivien/gexe/variables.go index d8987ba25..54230ee0a 100644 --- a/vendor/github.com/vladimirvivien/gexe/variables.go +++ b/vendor/github.com/vladimirvivien/gexe/variables.go @@ -12,7 +12,7 @@ func (e *Echo) Variables() *vars.Variables { // Envs declares environment variables using // a multi-line space-separated list: // -// Envs("GOOS=linux GOARCH=amd64") +// Envs("GOOS=linux GOARCH=amd64") // // Environment vars can be used in string values // using Eval("building for os=$GOOS") @@ -32,7 +32,7 @@ func (e *Echo) SetEnv(name, value string) *Echo { // Vars declares session-scope variables using // a multi-line space-separated list: // -// Envs("foo=bar platform=amd64") +// Envs("foo=bar platform=amd64") // // Session vars can be used in string values // using Eval("My foo=$foo"). diff --git a/vendor/golang.org/x/net/http/httpproxy/proxy.go b/vendor/golang.org/x/net/http/httpproxy/proxy.go new file mode 100644 index 000000000..c3bd9a1ee --- /dev/null +++ b/vendor/golang.org/x/net/http/httpproxy/proxy.go @@ -0,0 +1,370 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httpproxy provides support for HTTP proxy determination +// based on environment variables, as provided by net/http's +// ProxyFromEnvironment function. +// +// The API is not subject to the Go 1 compatibility promise and may change at +// any time. +package httpproxy + +import ( + "errors" + "fmt" + "net" + "net/url" + "os" + "strings" + "unicode/utf8" + + "golang.org/x/net/idna" +) + +// Config holds configuration for HTTP proxy settings. See +// FromEnvironment for details. +type Config struct { + // HTTPProxy represents the value of the HTTP_PROXY or + // http_proxy environment variable. It will be used as the proxy + // URL for HTTP requests unless overridden by NoProxy. + HTTPProxy string + + // HTTPSProxy represents the HTTPS_PROXY or https_proxy + // environment variable. It will be used as the proxy URL for + // HTTPS requests unless overridden by NoProxy. + HTTPSProxy string + + // NoProxy represents the NO_PROXY or no_proxy environment + // variable. It specifies a string that contains comma-separated values + // specifying hosts that should be excluded from proxying. Each value is + // represented by an IP address prefix (1.2.3.4), an IP address prefix in + // CIDR notation (1.2.3.4/8), a domain name, or a special DNS label (*). + // An IP address prefix and domain name can also include a literal port + // number (1.2.3.4:80). + // A domain name matches that name and all subdomains. A domain name with + // a leading "." matches subdomains only. For example "foo.com" matches + // "foo.com" and "bar.foo.com"; ".y.com" matches "x.y.com" but not "y.com". + // A single asterisk (*) indicates that no proxying should be done. + // A best effort is made to parse the string and errors are + // ignored. + NoProxy string + + // CGI holds whether the current process is running + // as a CGI handler (FromEnvironment infers this from the + // presence of a REQUEST_METHOD environment variable). + // When this is set, ProxyForURL will return an error + // when HTTPProxy applies, because a client could be + // setting HTTP_PROXY maliciously. See https://golang.org/s/cgihttpproxy. + CGI bool +} + +// config holds the parsed configuration for HTTP proxy settings. +type config struct { + // Config represents the original configuration as defined above. + Config + + // httpsProxy is the parsed URL of the HTTPSProxy if defined. + httpsProxy *url.URL + + // httpProxy is the parsed URL of the HTTPProxy if defined. + httpProxy *url.URL + + // ipMatchers represent all values in the NoProxy that are IP address + // prefixes or an IP address in CIDR notation. + ipMatchers []matcher + + // domainMatchers represent all values in the NoProxy that are a domain + // name or hostname & domain name + domainMatchers []matcher +} + +// FromEnvironment returns a Config instance populated from the +// environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the +// lowercase versions thereof). +// +// The environment values may be either a complete URL or a +// "host[:port]", in which case the "http" scheme is assumed. An error +// is returned if the value is a different form. +func FromEnvironment() *Config { + return &Config{ + HTTPProxy: getEnvAny("HTTP_PROXY", "http_proxy"), + HTTPSProxy: getEnvAny("HTTPS_PROXY", "https_proxy"), + NoProxy: getEnvAny("NO_PROXY", "no_proxy"), + CGI: os.Getenv("REQUEST_METHOD") != "", + } +} + +func getEnvAny(names ...string) string { + for _, n := range names { + if val := os.Getenv(n); val != "" { + return val + } + } + return "" +} + +// ProxyFunc returns a function that determines the proxy URL to use for +// a given request URL. Changing the contents of cfg will not affect +// proxy functions created earlier. +// +// A nil URL and nil error are returned if no proxy is defined in the +// environment, or a proxy should not be used for the given request, as +// defined by NO_PROXY. +// +// As a special case, if req.URL.Host is "localhost" or a loopback address +// (with or without a port number), then a nil URL and nil error will be returned. +func (cfg *Config) ProxyFunc() func(reqURL *url.URL) (*url.URL, error) { + // Preprocess the Config settings for more efficient evaluation. + cfg1 := &config{ + Config: *cfg, + } + cfg1.init() + return cfg1.proxyForURL +} + +func (cfg *config) proxyForURL(reqURL *url.URL) (*url.URL, error) { + var proxy *url.URL + if reqURL.Scheme == "https" { + proxy = cfg.httpsProxy + } else if reqURL.Scheme == "http" { + proxy = cfg.httpProxy + if proxy != nil && cfg.CGI { + return nil, errors.New("refusing to use HTTP_PROXY value in CGI environment; see golang.org/s/cgihttpproxy") + } + } + if proxy == nil { + return nil, nil + } + if !cfg.useProxy(canonicalAddr(reqURL)) { + return nil, nil + } + + return proxy, nil +} + +func parseProxy(proxy string) (*url.URL, error) { + if proxy == "" { + return nil, nil + } + + proxyURL, err := url.Parse(proxy) + if err != nil || + (proxyURL.Scheme != "http" && + proxyURL.Scheme != "https" && + proxyURL.Scheme != "socks5") { + // proxy was bogus. Try prepending "http://" to it and + // see if that parses correctly. If not, we fall + // through and complain about the original one. + if proxyURL, err := url.Parse("http://" + proxy); err == nil { + return proxyURL, nil + } + } + if err != nil { + return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err) + } + return proxyURL, nil +} + +// useProxy reports whether requests to addr should use a proxy, +// according to the NO_PROXY or no_proxy environment variable. +// addr is always a canonicalAddr with a host and port. +func (cfg *config) useProxy(addr string) bool { + if len(addr) == 0 { + return true + } + host, port, err := net.SplitHostPort(addr) + if err != nil { + return false + } + if host == "localhost" { + return false + } + ip := net.ParseIP(host) + if ip != nil { + if ip.IsLoopback() { + return false + } + } + + addr = strings.ToLower(strings.TrimSpace(host)) + + if ip != nil { + for _, m := range cfg.ipMatchers { + if m.match(addr, port, ip) { + return false + } + } + } + for _, m := range cfg.domainMatchers { + if m.match(addr, port, ip) { + return false + } + } + return true +} + +func (c *config) init() { + if parsed, err := parseProxy(c.HTTPProxy); err == nil { + c.httpProxy = parsed + } + if parsed, err := parseProxy(c.HTTPSProxy); err == nil { + c.httpsProxy = parsed + } + + for _, p := range strings.Split(c.NoProxy, ",") { + p = strings.ToLower(strings.TrimSpace(p)) + if len(p) == 0 { + continue + } + + if p == "*" { + c.ipMatchers = []matcher{allMatch{}} + c.domainMatchers = []matcher{allMatch{}} + return + } + + // IPv4/CIDR, IPv6/CIDR + if _, pnet, err := net.ParseCIDR(p); err == nil { + c.ipMatchers = append(c.ipMatchers, cidrMatch{cidr: pnet}) + continue + } + + // IPv4:port, [IPv6]:port + phost, pport, err := net.SplitHostPort(p) + if err == nil { + if len(phost) == 0 { + // There is no host part, likely the entry is malformed; ignore. + continue + } + if phost[0] == '[' && phost[len(phost)-1] == ']' { + phost = phost[1 : len(phost)-1] + } + } else { + phost = p + } + // IPv4, IPv6 + if pip := net.ParseIP(phost); pip != nil { + c.ipMatchers = append(c.ipMatchers, ipMatch{ip: pip, port: pport}) + continue + } + + if len(phost) == 0 { + // There is no host part, likely the entry is malformed; ignore. + continue + } + + // domain.com or domain.com:80 + // foo.com matches bar.foo.com + // .domain.com or .domain.com:port + // *.domain.com or *.domain.com:port + if strings.HasPrefix(phost, "*.") { + phost = phost[1:] + } + matchHost := false + if phost[0] != '.' { + matchHost = true + phost = "." + phost + } + if v, err := idnaASCII(phost); err == nil { + phost = v + } + c.domainMatchers = append(c.domainMatchers, domainMatch{host: phost, port: pport, matchHost: matchHost}) + } +} + +var portMap = map[string]string{ + "http": "80", + "https": "443", + "socks5": "1080", +} + +// canonicalAddr returns url.Host but always with a ":port" suffix +func canonicalAddr(url *url.URL) string { + addr := url.Hostname() + if v, err := idnaASCII(addr); err == nil { + addr = v + } + port := url.Port() + if port == "" { + port = portMap[url.Scheme] + } + return net.JoinHostPort(addr, port) +} + +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +func idnaASCII(v string) (string, error) { + // TODO: Consider removing this check after verifying performance is okay. + // Right now punycode verification, length checks, context checks, and the + // permissible character tests are all omitted. It also prevents the ToASCII + // call from salvaging an invalid IDN, when possible. As a result it may be + // possible to have two IDNs that appear identical to the user where the + // ASCII-only version causes an error downstream whereas the non-ASCII + // version does not. + // Note that for correct ASCII IDNs ToASCII will only do considerably more + // work, but it will not cause an allocation. + if isASCII(v) { + return v, nil + } + return idna.Lookup.ToASCII(v) +} + +func isASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} + +// matcher represents the matching rule for a given value in the NO_PROXY list +type matcher interface { + // match returns true if the host and optional port or ip and optional port + // are allowed + match(host, port string, ip net.IP) bool +} + +// allMatch matches on all possible inputs +type allMatch struct{} + +func (a allMatch) match(host, port string, ip net.IP) bool { + return true +} + +type cidrMatch struct { + cidr *net.IPNet +} + +func (m cidrMatch) match(host, port string, ip net.IP) bool { + return m.cidr.Contains(ip) +} + +type ipMatch struct { + ip net.IP + port string +} + +func (m ipMatch) match(host, port string, ip net.IP) bool { + if m.ip.Equal(ip) { + return m.port == "" || m.port == port + } + return false +} + +type domainMatch struct { + host string + port string + + matchHost bool +} + +func (m domainMatch) match(host, port string, ip net.IP) bool { + if strings.HasSuffix(host, m.host) || (m.matchHost && host == m.host[1:]) { + return m.port == "" || m.port == port + } + return false +} diff --git a/vendor/golang.org/x/net/internal/socks/client.go b/vendor/golang.org/x/net/internal/socks/client.go new file mode 100644 index 000000000..3d6f516a5 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/client.go @@ -0,0 +1,168 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" + "time" +) + +var ( + noDeadline = time.Time{} + aLongTimeAgo = time.Unix(1, 0) +) + +func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) { + host, port, err := splitHostPort(address) + if err != nil { + return nil, err + } + if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() { + c.SetDeadline(deadline) + defer c.SetDeadline(noDeadline) + } + if ctx != context.Background() { + errCh := make(chan error, 1) + done := make(chan struct{}) + defer func() { + close(done) + if ctxErr == nil { + ctxErr = <-errCh + } + }() + go func() { + select { + case <-ctx.Done(): + c.SetDeadline(aLongTimeAgo) + errCh <- ctx.Err() + case <-done: + errCh <- nil + } + }() + } + + b := make([]byte, 0, 6+len(host)) // the size here is just an estimate + b = append(b, Version5) + if len(d.AuthMethods) == 0 || d.Authenticate == nil { + b = append(b, 1, byte(AuthMethodNotRequired)) + } else { + ams := d.AuthMethods + if len(ams) > 255 { + return nil, errors.New("too many authentication methods") + } + b = append(b, byte(len(ams))) + for _, am := range ams { + b = append(b, byte(am)) + } + } + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + am := AuthMethod(b[1]) + if am == AuthMethodNoAcceptableMethods { + return nil, errors.New("no acceptable authentication methods") + } + if d.Authenticate != nil { + if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil { + return + } + } + + b = b[:0] + b = append(b, Version5, byte(d.cmd), 0) + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + b = append(b, AddrTypeIPv4) + b = append(b, ip4...) + } else if ip6 := ip.To16(); ip6 != nil { + b = append(b, AddrTypeIPv6) + b = append(b, ip6...) + } else { + return nil, errors.New("unknown address type") + } + } else { + if len(host) > 255 { + return nil, errors.New("FQDN too long") + } + b = append(b, AddrTypeFQDN) + b = append(b, byte(len(host))) + b = append(b, host...) + } + b = append(b, byte(port>>8), byte(port)) + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded { + return nil, errors.New("unknown error " + cmdErr.String()) + } + if b[2] != 0 { + return nil, errors.New("non-zero reserved field") + } + l := 2 + var a Addr + switch b[3] { + case AddrTypeIPv4: + l += net.IPv4len + a.IP = make(net.IP, net.IPv4len) + case AddrTypeIPv6: + l += net.IPv6len + a.IP = make(net.IP, net.IPv6len) + case AddrTypeFQDN: + if _, err := io.ReadFull(c, b[:1]); err != nil { + return nil, err + } + l += int(b[0]) + default: + return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3]))) + } + if cap(b) < l { + b = make([]byte, l) + } else { + b = b[:l] + } + if _, ctxErr = io.ReadFull(c, b); ctxErr != nil { + return + } + if a.IP != nil { + copy(a.IP, b) + } else { + a.Name = string(b[:len(b)-2]) + } + a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1]) + return &a, nil +} + +func splitHostPort(address string) (string, int, error) { + host, port, err := net.SplitHostPort(address) + if err != nil { + return "", 0, err + } + portnum, err := strconv.Atoi(port) + if err != nil { + return "", 0, err + } + if 1 > portnum || portnum > 0xffff { + return "", 0, errors.New("port number out of range " + port) + } + return host, portnum, nil +} diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go new file mode 100644 index 000000000..84fcc32b6 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/socks.go @@ -0,0 +1,317 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socks provides a SOCKS version 5 client implementation. +// +// SOCKS protocol version 5 is defined in RFC 1928. +// Username/Password authentication for SOCKS version 5 is defined in +// RFC 1929. +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" +) + +// A Command represents a SOCKS command. +type Command int + +func (cmd Command) String() string { + switch cmd { + case CmdConnect: + return "socks connect" + case cmdBind: + return "socks bind" + default: + return "socks " + strconv.Itoa(int(cmd)) + } +} + +// An AuthMethod represents a SOCKS authentication method. +type AuthMethod int + +// A Reply represents a SOCKS command reply code. +type Reply int + +func (code Reply) String() string { + switch code { + case StatusSucceeded: + return "succeeded" + case 0x01: + return "general SOCKS server failure" + case 0x02: + return "connection not allowed by ruleset" + case 0x03: + return "network unreachable" + case 0x04: + return "host unreachable" + case 0x05: + return "connection refused" + case 0x06: + return "TTL expired" + case 0x07: + return "command not supported" + case 0x08: + return "address type not supported" + default: + return "unknown code: " + strconv.Itoa(int(code)) + } +} + +// Wire protocol constants. +const ( + Version5 = 0x05 + + AddrTypeIPv4 = 0x01 + AddrTypeFQDN = 0x03 + AddrTypeIPv6 = 0x04 + + CmdConnect Command = 0x01 // establishes an active-open forward proxy connection + cmdBind Command = 0x02 // establishes a passive-open forward proxy connection + + AuthMethodNotRequired AuthMethod = 0x00 // no authentication required + AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password + AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods + + StatusSucceeded Reply = 0x00 +) + +// An Addr represents a SOCKS-specific address. +// Either Name or IP is used exclusively. +type Addr struct { + Name string // fully-qualified domain name + IP net.IP + Port int +} + +func (a *Addr) Network() string { return "socks" } + +func (a *Addr) String() string { + if a == nil { + return "" + } + port := strconv.Itoa(a.Port) + if a.IP == nil { + return net.JoinHostPort(a.Name, port) + } + return net.JoinHostPort(a.IP.String(), port) +} + +// A Conn represents a forward proxy connection. +type Conn struct { + net.Conn + + boundAddr net.Addr +} + +// BoundAddr returns the address assigned by the proxy server for +// connecting to the command target address from the proxy server. +func (c *Conn) BoundAddr() net.Addr { + if c == nil { + return nil + } + return c.boundAddr +} + +// A Dialer holds SOCKS-specific options. +type Dialer struct { + cmd Command // either CmdConnect or cmdBind + proxyNetwork string // network between a proxy server and a client + proxyAddress string // proxy server address + + // ProxyDial specifies the optional dial function for + // establishing the transport connection. + ProxyDial func(context.Context, string, string) (net.Conn, error) + + // AuthMethods specifies the list of request authentication + // methods. + // If empty, SOCKS client requests only AuthMethodNotRequired. + AuthMethods []AuthMethod + + // Authenticate specifies the optional authentication + // function. It must be non-nil when AuthMethods is not empty. + // It must return an error when the authentication is failed. + Authenticate func(context.Context, io.ReadWriter, AuthMethod) error +} + +// DialContext connects to the provided address on the provided +// network. +// +// The returned error value may be a net.OpError. When the Op field of +// net.OpError contains "socks", the Source field contains a proxy +// server address and the Addr field contains a command target +// address. +// +// See func Dial of the net package of standard library for a +// description of the network and address parameters. +func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress) + } else { + var dd net.Dialer + c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + a, err := d.connect(ctx, c, address) + if err != nil { + c.Close() + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return &Conn{Conn: c, boundAddr: a}, nil +} + +// DialWithConn initiates a connection from SOCKS server to the target +// network and address using the connection c that is already +// connected to the SOCKS server. +// +// It returns the connection's local address assigned by the SOCKS +// server. +func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + a, err := d.connect(ctx, c, address) + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return a, nil +} + +// Dial connects to the provided address on the provided network. +// +// Unlike DialContext, it returns a raw transport connection instead +// of a forward proxy connection. +// +// Deprecated: Use DialContext or DialWithConn instead. +func (d *Dialer) Dial(network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress) + } else { + c, err = net.Dial(d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil { + c.Close() + return nil, err + } + return c, nil +} + +func (d *Dialer) validateTarget(network, address string) error { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return errors.New("network not implemented") + } + switch d.cmd { + case CmdConnect, cmdBind: + default: + return errors.New("command not implemented") + } + return nil +} + +func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) { + for i, s := range []string{d.proxyAddress, address} { + host, port, err := splitHostPort(s) + if err != nil { + return nil, nil, err + } + a := &Addr{Port: port} + a.IP = net.ParseIP(host) + if a.IP == nil { + a.Name = host + } + if i == 0 { + proxy = a + } else { + dst = a + } + } + return +} + +// NewDialer returns a new Dialer that dials through the provided +// proxy server's network and address. +func NewDialer(network, address string) *Dialer { + return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect} +} + +const ( + authUsernamePasswordVersion = 0x01 + authStatusSucceeded = 0x00 +) + +// UsernamePassword are the credentials for the username/password +// authentication method. +type UsernamePassword struct { + Username string + Password string +} + +// Authenticate authenticates a pair of username and password with the +// proxy server. +func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error { + switch auth { + case AuthMethodNotRequired: + return nil + case AuthMethodUsernamePassword: + if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) > 255 { + return errors.New("invalid username/password") + } + b := []byte{authUsernamePasswordVersion} + b = append(b, byte(len(up.Username))) + b = append(b, up.Username...) + b = append(b, byte(len(up.Password))) + b = append(b, up.Password...) + // TODO(mikio): handle IO deadlines and cancelation if + // necessary + if _, err := rw.Write(b); err != nil { + return err + } + if _, err := io.ReadFull(rw, b[:2]); err != nil { + return err + } + if b[0] != authUsernamePasswordVersion { + return errors.New("invalid username/password version") + } + if b[1] != authStatusSucceeded { + return errors.New("username/password authentication failed") + } + return nil + } + return errors.New("unsupported authentication method " + strconv.Itoa(int(auth))) +} diff --git a/vendor/golang.org/x/net/proxy/dial.go b/vendor/golang.org/x/net/proxy/dial.go new file mode 100644 index 000000000..811c2e4e9 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/dial.go @@ -0,0 +1,54 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" +) + +// A ContextDialer dials using a context. +type ContextDialer interface { + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +// Dial works like DialContext on net.Dialer but using a dialer returned by FromEnvironment. +// +// The passed ctx is only used for returning the Conn, not the lifetime of the Conn. +// +// Custom dialers (registered via RegisterDialerType) that do not implement ContextDialer +// can leak a goroutine for as long as it takes the underlying Dialer implementation to timeout. +// +// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. +func Dial(ctx context.Context, network, address string) (net.Conn, error) { + d := FromEnvironment() + if xd, ok := d.(ContextDialer); ok { + return xd.DialContext(ctx, network, address) + } + return dialContext(ctx, d, network, address) +} + +// WARNING: this can leak a goroutine for as long as the underlying Dialer implementation takes to timeout +// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. +func dialContext(ctx context.Context, d Dialer, network, address string) (net.Conn, error) { + var ( + conn net.Conn + done = make(chan struct{}, 1) + err error + ) + go func() { + conn, err = d.Dial(network, address) + close(done) + if conn != nil && ctx.Err() != nil { + conn.Close() + } + }() + select { + case <-ctx.Done(): + err = ctx.Err() + case <-done: + } + return conn, err +} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go new file mode 100644 index 000000000..3d66bdef9 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/direct.go @@ -0,0 +1,31 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" +) + +type direct struct{} + +// Direct implements Dialer by making network connections directly using net.Dial or net.DialContext. +var Direct = direct{} + +var ( + _ Dialer = Direct + _ ContextDialer = Direct +) + +// Dial directly invokes net.Dial with the supplied parameters. +func (direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// DialContext instantiates a net.Dialer and invokes its DialContext receiver with the supplied parameters. +func (direct) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, network, addr) +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go new file mode 100644 index 000000000..573fe79e8 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -0,0 +1,155 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" + "strings" +) + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type PerHost struct { + def, bypass Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func NewPerHost(defaultDialer, bypass Dialer) *PerHost { + return &PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +// DialContext connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + d := p.dialerForRequest(host) + if x, ok := d.(ContextDialer); ok { + return x.DialContext(ctx, network, addr) + } + return dialContext(ctx, d, network, addr) +} + +func (p *PerHost) dialerForRequest(host string) Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go new file mode 100644 index 000000000..9ff4b9a77 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy.go @@ -0,0 +1,149 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proxy provides support for a variety of protocols to proxy network +// data. +package proxy // import "golang.org/x/net/proxy" + +import ( + "errors" + "net" + "net/url" + "os" + "sync" +) + +// A Dialer is a means to establish a connection. +// Custom dialers should also implement ContextDialer. +type Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy-related +// variables in the environment and makes underlying connections +// directly. +func FromEnvironment() Dialer { + return FromEnvironmentUsing(Direct) +} + +// FromEnvironmentUsing returns the dialer specify by the proxy-related +// variables in the environment and makes underlying connections +// using the provided forwarding Dialer (for instance, a *net.Dialer +// with desired configuration). +func FromEnvironmentUsing(forward Dialer) Dialer { + allProxy := allProxyEnv.Get() + if len(allProxy) == 0 { + return forward + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return forward + } + proxy, err := FromURL(proxyURL, forward) + if err != nil { + return forward + } + + noProxy := noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := NewPerHost(proxy, forward) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) { + if proxySchemes == nil { + proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error)) + } + proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func FromURL(u *url.URL, forward Dialer) (Dialer, error) { + var auth *Auth + if u.User != nil { + auth = new(Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5", "socks5h": + addr := u.Hostname() + port := u.Port() + if port == "" { + port = "1080" + } + return SOCKS5("tcp", net.JoinHostPort(addr, port), auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxySchemes != nil { + if f, ok := proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + allProxyEnv = &envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + noProxyEnv = &envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type envOnce struct { + names []string + once sync.Once + val string +} + +func (e *envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// reset is used by tests +func (e *envOnce) reset() { + e.once = sync.Once{} + e.val = "" +} diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go new file mode 100644 index 000000000..c91651f96 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/socks5.go @@ -0,0 +1,42 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" + + "golang.org/x/net/internal/socks" +) + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given +// address with an optional username and password. +// See RFC 1928 and RFC 1929. +func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) { + d := socks.NewDialer(network, address) + if forward != nil { + if f, ok := forward.(ContextDialer); ok { + d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { + return f.DialContext(ctx, network, address) + } + } else { + d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { + return dialContext(ctx, forward, network, address) + } + } + } + if auth != nil { + up := socks.UsernamePassword{ + Username: auth.User, + Password: auth.Password, + } + d.AuthMethods = []socks.AuthMethod{ + socks.AuthMethodNotRequired, + socks.AuthMethodUsernamePassword, + } + d.Authenticate = up.Authenticate + } + return d, nil +} diff --git a/vendor/golang.org/x/text/width/kind_string.go b/vendor/golang.org/x/text/width/kind_string.go deleted file mode 100644 index dd3febd43..000000000 --- a/vendor/golang.org/x/text/width/kind_string.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by "stringer -type=Kind"; DO NOT EDIT. - -package width - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[Neutral-0] - _ = x[EastAsianAmbiguous-1] - _ = x[EastAsianWide-2] - _ = x[EastAsianNarrow-3] - _ = x[EastAsianFullwidth-4] - _ = x[EastAsianHalfwidth-5] -} - -const _Kind_name = "NeutralEastAsianAmbiguousEastAsianWideEastAsianNarrowEastAsianFullwidthEastAsianHalfwidth" - -var _Kind_index = [...]uint8{0, 7, 25, 38, 53, 71, 89} - -func (i Kind) String() string { - if i < 0 || i >= Kind(len(_Kind_index)-1) { - return "Kind(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] -} diff --git a/vendor/golang.org/x/text/width/tables10.0.0.go b/vendor/golang.org/x/text/width/tables10.0.0.go deleted file mode 100644 index cd9d91caf..000000000 --- a/vendor/golang.org/x/text/width/tables10.0.0.go +++ /dev/null @@ -1,1329 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -//go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 - -package width - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "10.0.0" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// widthTrie. Total size: 14336 bytes (14.00 KiB). Checksum: c59df54630d3dc4a. -type widthTrie struct{} - -func newWidthTrie(i int) *widthTrie { - return &widthTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { - switch { - default: - return uint16(widthValues[n<<6+uint32(b)]) - } -} - -// widthValues: 101 blocks, 6464 entries, 12928 bytes -// The third block is the zero block. -var widthValues = [6464]uint16{ - // Block 0x0, offset 0x0 - 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, - 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, - 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, - 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, - 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, - 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, - // Block 0x1, offset 0x40 - 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, - 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, - 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, - 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, - 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, - 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, - 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, - 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, - 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, - 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, - 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, - 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, - 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, - 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, - 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, - 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, - // Block 0x4, offset 0x100 - 0x106: 0x2000, - 0x110: 0x2000, - 0x117: 0x2000, - 0x118: 0x2000, - 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, - 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, - 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, - 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, - 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, - 0x13c: 0x2000, 0x13e: 0x2000, - // Block 0x5, offset 0x140 - 0x141: 0x2000, - 0x151: 0x2000, - 0x153: 0x2000, - 0x15b: 0x2000, - 0x166: 0x2000, 0x167: 0x2000, - 0x16b: 0x2000, - 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, - 0x178: 0x2000, - 0x17f: 0x2000, - // Block 0x6, offset 0x180 - 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, - 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, - 0x18d: 0x2000, - 0x192: 0x2000, 0x193: 0x2000, - 0x1a6: 0x2000, 0x1a7: 0x2000, - 0x1ab: 0x2000, - // Block 0x7, offset 0x1c0 - 0x1ce: 0x2000, 0x1d0: 0x2000, - 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, - 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, - // Block 0x8, offset 0x200 - 0x211: 0x2000, - 0x221: 0x2000, - // Block 0x9, offset 0x240 - 0x244: 0x2000, - 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, - 0x24d: 0x2000, 0x250: 0x2000, - 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, - 0x25f: 0x2000, - // Block 0xa, offset 0x280 - 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, - 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, - 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, - 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, - 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, - 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, - 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, - 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, - 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, - 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, - 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, - 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, - 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, - 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, - 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, - 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, - 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, - 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, - // Block 0xc, offset 0x300 - 0x311: 0x2000, - 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, - 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, - 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, - 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, - 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, - 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, - 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, - // Block 0xd, offset 0x340 - 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, - 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, - // Block 0xe, offset 0x380 - 0x381: 0x2000, - 0x390: 0x2000, 0x391: 0x2000, - 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, - 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, - 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, - 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, - 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, - 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, - 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, - 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, - 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, - 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, - // Block 0x10, offset 0x400 - 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, - 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, - 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, - 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, - 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, - 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, - 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, - 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, - 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, - 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, - 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, - // Block 0x11, offset 0x440 - 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, - 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, - 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, - 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, - 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, - 0x45e: 0x4000, 0x45f: 0x4000, - // Block 0x12, offset 0x480 - 0x490: 0x2000, - 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, - 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, - 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, - 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, - 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, - 0x4bb: 0x2000, - 0x4be: 0x2000, - // Block 0x13, offset 0x4c0 - 0x4f4: 0x2000, - 0x4ff: 0x2000, - // Block 0x14, offset 0x500 - 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, - 0x529: 0xa009, - 0x52c: 0x2000, - // Block 0x15, offset 0x540 - 0x543: 0x2000, 0x545: 0x2000, - 0x549: 0x2000, - 0x553: 0x2000, 0x556: 0x2000, - 0x561: 0x2000, 0x562: 0x2000, - 0x566: 0x2000, - 0x56b: 0x2000, - // Block 0x16, offset 0x580 - 0x593: 0x2000, 0x594: 0x2000, - 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, - 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, - 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, - 0x5aa: 0x2000, 0x5ab: 0x2000, - 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, - 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, - // Block 0x17, offset 0x5c0 - 0x5c9: 0x2000, - 0x5d0: 0x200a, 0x5d1: 0x200b, - 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, - 0x5d8: 0x2000, 0x5d9: 0x2000, - 0x5f8: 0x2000, 0x5f9: 0x2000, - // Block 0x18, offset 0x600 - 0x612: 0x2000, 0x614: 0x2000, - 0x627: 0x2000, - // Block 0x19, offset 0x640 - 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, - 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, - 0x64f: 0x2000, 0x651: 0x2000, - 0x655: 0x2000, - 0x65a: 0x2000, 0x65d: 0x2000, - 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, - 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, - 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, - 0x674: 0x2000, 0x675: 0x2000, - 0x676: 0x2000, 0x677: 0x2000, - 0x67c: 0x2000, 0x67d: 0x2000, - // Block 0x1a, offset 0x680 - 0x688: 0x2000, - 0x68c: 0x2000, - 0x692: 0x2000, - 0x6a0: 0x2000, 0x6a1: 0x2000, - 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, - 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, - // Block 0x1b, offset 0x6c0 - 0x6c2: 0x2000, 0x6c3: 0x2000, - 0x6c6: 0x2000, 0x6c7: 0x2000, - 0x6d5: 0x2000, - 0x6d9: 0x2000, - 0x6e5: 0x2000, - 0x6ff: 0x2000, - // Block 0x1c, offset 0x700 - 0x712: 0x2000, - 0x71a: 0x4000, 0x71b: 0x4000, - 0x729: 0x4000, - 0x72a: 0x4000, - // Block 0x1d, offset 0x740 - 0x769: 0x4000, - 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, - 0x770: 0x4000, 0x773: 0x4000, - // Block 0x1e, offset 0x780 - 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, - 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, - 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, - 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, - 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, - 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, - 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, - 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, - 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, - 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, - 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, - 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, - 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, - 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, - 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, - 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, - // Block 0x20, offset 0x800 - 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, - 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, - 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, - 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, - 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, - 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, - 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, - 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, - 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, - 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, - 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, - // Block 0x21, offset 0x840 - 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, - 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, - 0x850: 0x2000, 0x851: 0x2000, - 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, - 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, - 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, - 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, - 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, - 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, - // Block 0x22, offset 0x880 - 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, - 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, - 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, - 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, - 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, - 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, - 0x8b2: 0x2000, 0x8b3: 0x2000, - 0x8b6: 0x2000, 0x8b7: 0x2000, - 0x8bc: 0x2000, 0x8bd: 0x2000, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x2000, 0x8c1: 0x2000, - 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, - 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, - 0x8e2: 0x2000, 0x8e3: 0x2000, - 0x8e4: 0x2000, 0x8e5: 0x2000, - 0x8ef: 0x2000, - 0x8fd: 0x4000, 0x8fe: 0x4000, - // Block 0x24, offset 0x900 - 0x905: 0x2000, - 0x906: 0x2000, 0x909: 0x2000, - 0x90e: 0x2000, 0x90f: 0x2000, - 0x914: 0x4000, 0x915: 0x4000, - 0x91c: 0x2000, - 0x91e: 0x2000, - // Block 0x25, offset 0x940 - 0x940: 0x2000, 0x942: 0x2000, - 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, - 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, - 0x952: 0x4000, 0x953: 0x4000, - 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, - 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, - 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, - 0x97f: 0x4000, - // Block 0x26, offset 0x980 - 0x993: 0x4000, - 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, - 0x9aa: 0x4000, 0x9ab: 0x4000, - 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, - // Block 0x27, offset 0x9c0 - 0x9c4: 0x4000, 0x9c5: 0x4000, - 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, - 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, - 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, - 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, - 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, - 0x9e8: 0x2000, 0x9e9: 0x2000, - 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, - 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, - 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, - 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, - // Block 0x28, offset 0xa00 - 0xa05: 0x4000, - 0xa0a: 0x4000, 0xa0b: 0x4000, - 0xa28: 0x4000, - 0xa3d: 0x2000, - // Block 0x29, offset 0xa40 - 0xa4c: 0x4000, 0xa4e: 0x4000, - 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, - 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, - 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, - // Block 0x2a, offset 0xa80 - 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, - 0xab0: 0x4000, - 0xabf: 0x4000, - // Block 0x2b, offset 0xac0 - 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, - 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, - // Block 0x2c, offset 0xb00 - 0xb05: 0x6010, - 0xb06: 0x6011, - // Block 0x2d, offset 0xb40 - 0xb5b: 0x4000, 0xb5c: 0x4000, - // Block 0x2e, offset 0xb80 - 0xb90: 0x4000, - 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, - 0xb98: 0x2000, 0xb99: 0x2000, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, - 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, - 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, - 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, - 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, - 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, - 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, - 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, - 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, - 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, - 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, - // Block 0x30, offset 0xc00 - 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, - 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, - 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, - 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, - 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, - 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, - 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, - 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, - 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, - // Block 0x31, offset 0xc40 - 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, - 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, - 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, - 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, - 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, - 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, - // Block 0x32, offset 0xc80 - 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, - 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, - 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, - 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, - 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, - 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, - 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, - 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, - 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, - 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, - 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, - // Block 0x33, offset 0xcc0 - 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, - 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, - 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, - 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, - 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, - 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, - 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, - 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, - 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, - 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, - 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, - // Block 0x34, offset 0xd00 - 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, - 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, - 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, - 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, - 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, - 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, - 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, - 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, - 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, - 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, - 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, - // Block 0x35, offset 0xd40 - 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, - 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, - 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, - 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, - 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, - 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, - 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, - 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, - 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, - 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, - 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, - // Block 0x36, offset 0xd80 - 0xd85: 0x4000, - 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, - 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, - 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, - 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, - 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, - 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, - 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, - 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, - 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, - 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, - 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, - 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, - 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, - 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, - 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, - 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, - 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, - 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, - 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, - 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, - // Block 0x38, offset 0xe00 - 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, - 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, - 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, - 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, - 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, - 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, - 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, - 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, - 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, - 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, - // Block 0x39, offset 0xe40 - 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, - 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, - 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, - 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, - 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, - 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, - 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, - 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, - 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, - // Block 0x3a, offset 0xe80 - 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, - 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, - 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, - 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, - 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, - 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, - 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, - 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, - 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, - 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, - 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, - // Block 0x3b, offset 0xec0 - 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, - 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, - 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, - 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, - 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, - 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, - 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, - 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, - 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, - 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, - 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, - // Block 0x3c, offset 0xf00 - 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, - 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, - 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, - 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, - 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, - 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, - 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, - 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, - 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, - 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, - 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, - // Block 0x3d, offset 0xf40 - 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, - 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, - 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, - 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, - 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, - 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, - 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, - 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, - 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, - 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, - 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, - // Block 0x3e, offset 0xf80 - 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, - 0xf86: 0x4000, - // Block 0x3f, offset 0xfc0 - 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, - 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, - 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, - 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, - 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, - 0xffc: 0x4000, - // Block 0x40, offset 0x1000 - 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, - 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, - 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, - 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, - 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, - 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, - // Block 0x41, offset 0x1040 - 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, - 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, - 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, - 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, - 0x1058: 0x4000, 0x1059: 0x4000, - 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, - 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, - 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, - // Block 0x42, offset 0x1080 - 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, - 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, - 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, - 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, - 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, - 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, - 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, - 0x10aa: 0x4000, 0x10ab: 0x4000, - // Block 0x43, offset 0x10c0 - 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, - 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, - 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, - 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, - 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, - 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, - 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, - 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, - 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, - 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, - 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, - // Block 0x44, offset 0x1100 - 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, - 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, - 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, - 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, - 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, - 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, - 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, - 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, - 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, - 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, - 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, - // Block 0x45, offset 0x1140 - 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, - 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, - 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, - 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, - 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, - 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, - 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, - 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, - 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, - 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, - 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, - // Block 0x46, offset 0x1180 - 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, - 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, - 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, - 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, - 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, - 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, - 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, - 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, - 0x11bd: 0x2000, - // Block 0x47, offset 0x11c0 - 0x11e0: 0x4000, 0x11e1: 0x4000, - // Block 0x48, offset 0x1200 - 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, - 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, - 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, - 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, - 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, - 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, - 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, - 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, - // Block 0x49, offset 0x1240 - 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, - 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, - 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, - 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, - 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, - 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, - 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, - 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, - 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, - // Block 0x4a, offset 0x1280 - 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, - 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, - 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, - 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, - 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, - 0x129e: 0x4000, - // Block 0x4b, offset 0x12c0 - 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, - 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, - 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, - // Block 0x4c, offset 0x1300 - 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, - 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, - 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, - 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, - 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, - 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, - 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, - 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, - 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, - 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, - // Block 0x4d, offset 0x1340 - 0x1344: 0x4000, - // Block 0x4e, offset 0x1380 - 0x138f: 0x4000, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, - 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, - 0x13d0: 0x2000, 0x13d1: 0x2000, - 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, - 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, - 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, - 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, - 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, - 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, - 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, - 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, - // Block 0x50, offset 0x1400 - 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, - 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, - 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, - 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, - 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, - 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, - 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, - 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, - 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, - 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, - // Block 0x51, offset 0x1440 - 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, - 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, - 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, - 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, - 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, - 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, - 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, - 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, - // Block 0x52, offset 0x1480 - 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, - 0x1490: 0x4000, 0x1491: 0x4000, - 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, - 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, - 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, - 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, - 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, - 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, - 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, - 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, - 0x14d0: 0x4000, 0x14d1: 0x4000, - 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, - 0x14e4: 0x4000, 0x14e5: 0x4000, - // Block 0x54, offset 0x1500 - 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, - 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, - 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, - 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, - 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, - 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, - 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, - 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, - 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, - 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, - // Block 0x55, offset 0x1540 - 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, - 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, - 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, - 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, - 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, - 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, - 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, - 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, - 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, - 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, - 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, - // Block 0x56, offset 0x1580 - 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, - 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, - 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, - 0x1592: 0x4000, 0x1593: 0x4000, - 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, - 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, - 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, - 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, - 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, - 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, - // Block 0x57, offset 0x15c0 - 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, - 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, - 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, - 0x15d2: 0x4000, 0x15d3: 0x4000, - 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, - 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, - 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, - 0x15f0: 0x4000, 0x15f4: 0x4000, - 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, - 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, - // Block 0x58, offset 0x1600 - 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, - 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, - 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, - 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, - 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, - 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, - 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, - 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, - 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, - 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, - 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, - // Block 0x59, offset 0x1640 - 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, - 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, - 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, - 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, - 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, - 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, - 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, - 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, - 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, - 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, - 0x167c: 0x4000, 0x167f: 0x4000, - // Block 0x5a, offset 0x1680 - 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, - 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, - 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, - 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, - 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, - 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, - 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, - 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, - 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, - 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, - 0x16bc: 0x4000, 0x16bd: 0x4000, - // Block 0x5b, offset 0x16c0 - 0x16cb: 0x4000, - 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, - 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, - 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, - 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, - 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, - 0x16fa: 0x4000, - // Block 0x5c, offset 0x1700 - 0x1715: 0x4000, 0x1716: 0x4000, - 0x1724: 0x4000, - // Block 0x5d, offset 0x1740 - 0x177b: 0x4000, - 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, - // Block 0x5e, offset 0x1780 - 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, - 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, - 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, - 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, - 0x17d2: 0x4000, - 0x17eb: 0x4000, 0x17ec: 0x4000, - 0x17f4: 0x4000, 0x17f5: 0x4000, - 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, - // Block 0x60, offset 0x1800 - 0x1810: 0x4000, 0x1811: 0x4000, - 0x1812: 0x4000, 0x1813: 0x4000, 0x1814: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, - 0x1818: 0x4000, 0x1819: 0x4000, 0x181a: 0x4000, 0x181b: 0x4000, 0x181c: 0x4000, 0x181d: 0x4000, - 0x181e: 0x4000, 0x181f: 0x4000, 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, - 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, - 0x182a: 0x4000, 0x182b: 0x4000, 0x182c: 0x4000, 0x182d: 0x4000, 0x182e: 0x4000, 0x182f: 0x4000, - 0x1830: 0x4000, 0x1831: 0x4000, 0x1832: 0x4000, 0x1833: 0x4000, 0x1834: 0x4000, 0x1835: 0x4000, - 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, - 0x183c: 0x4000, 0x183d: 0x4000, 0x183e: 0x4000, - // Block 0x61, offset 0x1840 - 0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000, - 0x1846: 0x4000, 0x1847: 0x4000, 0x1848: 0x4000, 0x1849: 0x4000, 0x184a: 0x4000, 0x184b: 0x4000, - 0x184c: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, - 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, - 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, - 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, - 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, - 0x186a: 0x4000, 0x186b: 0x4000, - // Block 0x62, offset 0x1880 - 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, - 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, - 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, - 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, - // Block 0x63, offset 0x18c0 - 0x18c0: 0x4000, - 0x18d0: 0x4000, 0x18d1: 0x4000, - 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, - 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, - 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, - 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, - // Block 0x64, offset 0x1900 - 0x1900: 0x2000, 0x1901: 0x2000, 0x1902: 0x2000, 0x1903: 0x2000, 0x1904: 0x2000, 0x1905: 0x2000, - 0x1906: 0x2000, 0x1907: 0x2000, 0x1908: 0x2000, 0x1909: 0x2000, 0x190a: 0x2000, 0x190b: 0x2000, - 0x190c: 0x2000, 0x190d: 0x2000, 0x190e: 0x2000, 0x190f: 0x2000, 0x1910: 0x2000, 0x1911: 0x2000, - 0x1912: 0x2000, 0x1913: 0x2000, 0x1914: 0x2000, 0x1915: 0x2000, 0x1916: 0x2000, 0x1917: 0x2000, - 0x1918: 0x2000, 0x1919: 0x2000, 0x191a: 0x2000, 0x191b: 0x2000, 0x191c: 0x2000, 0x191d: 0x2000, - 0x191e: 0x2000, 0x191f: 0x2000, 0x1920: 0x2000, 0x1921: 0x2000, 0x1922: 0x2000, 0x1923: 0x2000, - 0x1924: 0x2000, 0x1925: 0x2000, 0x1926: 0x2000, 0x1927: 0x2000, 0x1928: 0x2000, 0x1929: 0x2000, - 0x192a: 0x2000, 0x192b: 0x2000, 0x192c: 0x2000, 0x192d: 0x2000, 0x192e: 0x2000, 0x192f: 0x2000, - 0x1930: 0x2000, 0x1931: 0x2000, 0x1932: 0x2000, 0x1933: 0x2000, 0x1934: 0x2000, 0x1935: 0x2000, - 0x1936: 0x2000, 0x1937: 0x2000, 0x1938: 0x2000, 0x1939: 0x2000, 0x193a: 0x2000, 0x193b: 0x2000, - 0x193c: 0x2000, 0x193d: 0x2000, -} - -// widthIndex: 22 blocks, 1408 entries, 1408 bytes -// Block 0 is the zero block. -var widthIndex = [1408]uint8{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, - 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, - 0xd0: 0x0c, 0xd1: 0x0d, - 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, - 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, - 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, - // Block 0x4, offset 0x100 - 0x104: 0x0e, 0x105: 0x0f, - // Block 0x5, offset 0x140 - 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, - 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, - 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, - 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, - 0x166: 0x2a, - 0x16c: 0x2b, 0x16d: 0x2c, - 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, - // Block 0x6, offset 0x180 - 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, - 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, - 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, - 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, - 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, - 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, - 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, - 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, - 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, - 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, - 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, - 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, - 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, - 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, - 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, - // Block 0x8, offset 0x200 - 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, - 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, - 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, - 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, - 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, - 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, - 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, - 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, - // Block 0x9, offset 0x240 - 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, - 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, - 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, - 0x265: 0x3d, - 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, - 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, - // Block 0xa, offset 0x280 - 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, - 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, - 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, - 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, - 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, - 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, - 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, - 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, - 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, - 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, - 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, - // Block 0xc, offset 0x300 - 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, - 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, - 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, - 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, - 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, - 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, - 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, - // Block 0xd, offset 0x340 - 0x37f: 0x45, - // Block 0xe, offset 0x380 - 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, - 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, - 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, - 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, - 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, - 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, - 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, - // Block 0x10, offset 0x400 - 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, - 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, - 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, - 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, - 0x424: 0x5e, 0x425: 0x5f, 0x426: 0x60, 0x427: 0x61, - // Block 0x11, offset 0x440 - 0x456: 0x0b, 0x457: 0x06, - 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, - 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, - 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, - 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, - 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, - // Block 0x12, offset 0x480 - 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, - 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, - 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, - 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, - 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, - 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, - 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, - 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x62, - // Block 0x14, offset 0x500 - 0x520: 0x10, - 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, - 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, - // Block 0x15, offset 0x540 - 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, - 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, -} - -// inverseData contains 4-byte entries of the following format: -// -// <0 padding> -// -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// -// { 0x01, 0xE0, 0x00, 0x00 } -// -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// -// E0 ^ A1 = 41. -// -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// -// E0 ^ A2 = 42. -// -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8. -var inverseData = [150][4]byte{ - {0x00, 0x00, 0x00, 0x00}, - {0x03, 0xe3, 0x80, 0xa0}, - {0x03, 0xef, 0xbc, 0xa0}, - {0x03, 0xef, 0xbc, 0xe0}, - {0x03, 0xef, 0xbd, 0xe0}, - {0x03, 0xef, 0xbf, 0x02}, - {0x03, 0xef, 0xbf, 0x00}, - {0x03, 0xef, 0xbf, 0x0e}, - {0x03, 0xef, 0xbf, 0x0c}, - {0x03, 0xef, 0xbf, 0x0f}, - {0x03, 0xef, 0xbf, 0x39}, - {0x03, 0xef, 0xbf, 0x3b}, - {0x03, 0xef, 0xbf, 0x3f}, - {0x03, 0xef, 0xbf, 0x2a}, - {0x03, 0xef, 0xbf, 0x0d}, - {0x03, 0xef, 0xbf, 0x25}, - {0x03, 0xef, 0xbd, 0x1a}, - {0x03, 0xef, 0xbd, 0x26}, - {0x01, 0xa0, 0x00, 0x00}, - {0x03, 0xef, 0xbd, 0x25}, - {0x03, 0xef, 0xbd, 0x23}, - {0x03, 0xef, 0xbd, 0x2e}, - {0x03, 0xef, 0xbe, 0x07}, - {0x03, 0xef, 0xbe, 0x05}, - {0x03, 0xef, 0xbd, 0x06}, - {0x03, 0xef, 0xbd, 0x13}, - {0x03, 0xef, 0xbd, 0x0b}, - {0x03, 0xef, 0xbd, 0x16}, - {0x03, 0xef, 0xbd, 0x0c}, - {0x03, 0xef, 0xbd, 0x15}, - {0x03, 0xef, 0xbd, 0x0d}, - {0x03, 0xef, 0xbd, 0x1c}, - {0x03, 0xef, 0xbd, 0x02}, - {0x03, 0xef, 0xbd, 0x1f}, - {0x03, 0xef, 0xbd, 0x1d}, - {0x03, 0xef, 0xbd, 0x17}, - {0x03, 0xef, 0xbd, 0x08}, - {0x03, 0xef, 0xbd, 0x09}, - {0x03, 0xef, 0xbd, 0x0e}, - {0x03, 0xef, 0xbd, 0x04}, - {0x03, 0xef, 0xbd, 0x05}, - {0x03, 0xef, 0xbe, 0x3f}, - {0x03, 0xef, 0xbe, 0x00}, - {0x03, 0xef, 0xbd, 0x2c}, - {0x03, 0xef, 0xbe, 0x06}, - {0x03, 0xef, 0xbe, 0x0c}, - {0x03, 0xef, 0xbe, 0x0f}, - {0x03, 0xef, 0xbe, 0x0d}, - {0x03, 0xef, 0xbe, 0x0b}, - {0x03, 0xef, 0xbe, 0x19}, - {0x03, 0xef, 0xbe, 0x15}, - {0x03, 0xef, 0xbe, 0x11}, - {0x03, 0xef, 0xbe, 0x31}, - {0x03, 0xef, 0xbe, 0x33}, - {0x03, 0xef, 0xbd, 0x0f}, - {0x03, 0xef, 0xbe, 0x30}, - {0x03, 0xef, 0xbe, 0x3e}, - {0x03, 0xef, 0xbe, 0x32}, - {0x03, 0xef, 0xbe, 0x36}, - {0x03, 0xef, 0xbd, 0x14}, - {0x03, 0xef, 0xbe, 0x2e}, - {0x03, 0xef, 0xbd, 0x1e}, - {0x03, 0xef, 0xbe, 0x10}, - {0x03, 0xef, 0xbf, 0x13}, - {0x03, 0xef, 0xbf, 0x15}, - {0x03, 0xef, 0xbf, 0x17}, - {0x03, 0xef, 0xbf, 0x1f}, - {0x03, 0xef, 0xbf, 0x1d}, - {0x03, 0xef, 0xbf, 0x1b}, - {0x03, 0xef, 0xbf, 0x09}, - {0x03, 0xef, 0xbf, 0x0b}, - {0x03, 0xef, 0xbf, 0x37}, - {0x03, 0xef, 0xbe, 0x04}, - {0x01, 0xe0, 0x00, 0x00}, - {0x03, 0xe2, 0xa6, 0x1a}, - {0x03, 0xe2, 0xa6, 0x26}, - {0x03, 0xe3, 0x80, 0x23}, - {0x03, 0xe3, 0x80, 0x2e}, - {0x03, 0xe3, 0x80, 0x25}, - {0x03, 0xe3, 0x83, 0x1e}, - {0x03, 0xe3, 0x83, 0x14}, - {0x03, 0xe3, 0x82, 0x06}, - {0x03, 0xe3, 0x82, 0x0b}, - {0x03, 0xe3, 0x82, 0x0c}, - {0x03, 0xe3, 0x82, 0x0d}, - {0x03, 0xe3, 0x82, 0x02}, - {0x03, 0xe3, 0x83, 0x0f}, - {0x03, 0xe3, 0x83, 0x08}, - {0x03, 0xe3, 0x83, 0x09}, - {0x03, 0xe3, 0x83, 0x2c}, - {0x03, 0xe3, 0x83, 0x0c}, - {0x03, 0xe3, 0x82, 0x13}, - {0x03, 0xe3, 0x82, 0x16}, - {0x03, 0xe3, 0x82, 0x15}, - {0x03, 0xe3, 0x82, 0x1c}, - {0x03, 0xe3, 0x82, 0x1f}, - {0x03, 0xe3, 0x82, 0x1d}, - {0x03, 0xe3, 0x82, 0x1a}, - {0x03, 0xe3, 0x82, 0x17}, - {0x03, 0xe3, 0x82, 0x08}, - {0x03, 0xe3, 0x82, 0x09}, - {0x03, 0xe3, 0x82, 0x0e}, - {0x03, 0xe3, 0x82, 0x04}, - {0x03, 0xe3, 0x82, 0x05}, - {0x03, 0xe3, 0x82, 0x3f}, - {0x03, 0xe3, 0x83, 0x00}, - {0x03, 0xe3, 0x83, 0x06}, - {0x03, 0xe3, 0x83, 0x05}, - {0x03, 0xe3, 0x83, 0x0d}, - {0x03, 0xe3, 0x83, 0x0b}, - {0x03, 0xe3, 0x83, 0x07}, - {0x03, 0xe3, 0x83, 0x19}, - {0x03, 0xe3, 0x83, 0x15}, - {0x03, 0xe3, 0x83, 0x11}, - {0x03, 0xe3, 0x83, 0x31}, - {0x03, 0xe3, 0x83, 0x33}, - {0x03, 0xe3, 0x83, 0x30}, - {0x03, 0xe3, 0x83, 0x3e}, - {0x03, 0xe3, 0x83, 0x32}, - {0x03, 0xe3, 0x83, 0x36}, - {0x03, 0xe3, 0x83, 0x2e}, - {0x03, 0xe3, 0x82, 0x07}, - {0x03, 0xe3, 0x85, 0x04}, - {0x03, 0xe3, 0x84, 0x10}, - {0x03, 0xe3, 0x85, 0x30}, - {0x03, 0xe3, 0x85, 0x0d}, - {0x03, 0xe3, 0x85, 0x13}, - {0x03, 0xe3, 0x85, 0x15}, - {0x03, 0xe3, 0x85, 0x17}, - {0x03, 0xe3, 0x85, 0x1f}, - {0x03, 0xe3, 0x85, 0x1d}, - {0x03, 0xe3, 0x85, 0x1b}, - {0x03, 0xe3, 0x85, 0x09}, - {0x03, 0xe3, 0x85, 0x0f}, - {0x03, 0xe3, 0x85, 0x0b}, - {0x03, 0xe3, 0x85, 0x37}, - {0x03, 0xe3, 0x85, 0x3b}, - {0x03, 0xe3, 0x85, 0x39}, - {0x03, 0xe3, 0x85, 0x3f}, - {0x02, 0xc2, 0x02, 0x00}, - {0x02, 0xc2, 0x0e, 0x00}, - {0x02, 0xc2, 0x0c, 0x00}, - {0x02, 0xc2, 0x00, 0x00}, - {0x03, 0xe2, 0x82, 0x0f}, - {0x03, 0xe2, 0x94, 0x2a}, - {0x03, 0xe2, 0x86, 0x39}, - {0x03, 0xe2, 0x86, 0x3b}, - {0x03, 0xe2, 0x86, 0x3f}, - {0x03, 0xe2, 0x96, 0x0d}, - {0x03, 0xe2, 0x97, 0x25}, -} - -// Total table size 14936 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/tables11.0.0.go b/vendor/golang.org/x/text/width/tables11.0.0.go deleted file mode 100644 index 327eaef9b..000000000 --- a/vendor/golang.org/x/text/width/tables11.0.0.go +++ /dev/null @@ -1,1341 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -//go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 - -package width - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "11.0.0" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// widthTrie. Total size: 14336 bytes (14.00 KiB). Checksum: c0f7712776e71cd4. -type widthTrie struct{} - -func newWidthTrie(i int) *widthTrie { - return &widthTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { - switch { - default: - return uint16(widthValues[n<<6+uint32(b)]) - } -} - -// widthValues: 101 blocks, 6464 entries, 12928 bytes -// The third block is the zero block. -var widthValues = [6464]uint16{ - // Block 0x0, offset 0x0 - 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, - 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, - 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, - 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, - 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, - 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, - // Block 0x1, offset 0x40 - 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, - 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, - 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, - 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, - 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, - 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, - 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, - 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, - 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, - 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, - 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, - 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, - 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, - 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, - 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, - 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, - // Block 0x4, offset 0x100 - 0x106: 0x2000, - 0x110: 0x2000, - 0x117: 0x2000, - 0x118: 0x2000, - 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, - 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, - 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, - 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, - 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, - 0x13c: 0x2000, 0x13e: 0x2000, - // Block 0x5, offset 0x140 - 0x141: 0x2000, - 0x151: 0x2000, - 0x153: 0x2000, - 0x15b: 0x2000, - 0x166: 0x2000, 0x167: 0x2000, - 0x16b: 0x2000, - 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, - 0x178: 0x2000, - 0x17f: 0x2000, - // Block 0x6, offset 0x180 - 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, - 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, - 0x18d: 0x2000, - 0x192: 0x2000, 0x193: 0x2000, - 0x1a6: 0x2000, 0x1a7: 0x2000, - 0x1ab: 0x2000, - // Block 0x7, offset 0x1c0 - 0x1ce: 0x2000, 0x1d0: 0x2000, - 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, - 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, - // Block 0x8, offset 0x200 - 0x211: 0x2000, - 0x221: 0x2000, - // Block 0x9, offset 0x240 - 0x244: 0x2000, - 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, - 0x24d: 0x2000, 0x250: 0x2000, - 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, - 0x25f: 0x2000, - // Block 0xa, offset 0x280 - 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, - 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, - 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, - 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, - 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, - 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, - 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, - 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, - 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, - 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, - 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, - 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, - 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, - 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, - 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, - 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, - 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, - 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, - // Block 0xc, offset 0x300 - 0x311: 0x2000, - 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, - 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, - 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, - 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, - 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, - 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, - 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, - // Block 0xd, offset 0x340 - 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, - 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, - // Block 0xe, offset 0x380 - 0x381: 0x2000, - 0x390: 0x2000, 0x391: 0x2000, - 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, - 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, - 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, - 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, - 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, - 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, - 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, - 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, - 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, - 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, - // Block 0x10, offset 0x400 - 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, - 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, - 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, - 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, - 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, - 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, - 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, - 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, - 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, - 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, - 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, - // Block 0x11, offset 0x440 - 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, - 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, - 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, - 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, - 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, - 0x45e: 0x4000, 0x45f: 0x4000, - // Block 0x12, offset 0x480 - 0x490: 0x2000, - 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, - 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, - 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, - 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, - 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, - 0x4bb: 0x2000, - 0x4be: 0x2000, - // Block 0x13, offset 0x4c0 - 0x4f4: 0x2000, - 0x4ff: 0x2000, - // Block 0x14, offset 0x500 - 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, - 0x529: 0xa009, - 0x52c: 0x2000, - // Block 0x15, offset 0x540 - 0x543: 0x2000, 0x545: 0x2000, - 0x549: 0x2000, - 0x553: 0x2000, 0x556: 0x2000, - 0x561: 0x2000, 0x562: 0x2000, - 0x566: 0x2000, - 0x56b: 0x2000, - // Block 0x16, offset 0x580 - 0x593: 0x2000, 0x594: 0x2000, - 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, - 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, - 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, - 0x5aa: 0x2000, 0x5ab: 0x2000, - 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, - 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, - // Block 0x17, offset 0x5c0 - 0x5c9: 0x2000, - 0x5d0: 0x200a, 0x5d1: 0x200b, - 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, - 0x5d8: 0x2000, 0x5d9: 0x2000, - 0x5f8: 0x2000, 0x5f9: 0x2000, - // Block 0x18, offset 0x600 - 0x612: 0x2000, 0x614: 0x2000, - 0x627: 0x2000, - // Block 0x19, offset 0x640 - 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, - 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, - 0x64f: 0x2000, 0x651: 0x2000, - 0x655: 0x2000, - 0x65a: 0x2000, 0x65d: 0x2000, - 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, - 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, - 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, - 0x674: 0x2000, 0x675: 0x2000, - 0x676: 0x2000, 0x677: 0x2000, - 0x67c: 0x2000, 0x67d: 0x2000, - // Block 0x1a, offset 0x680 - 0x688: 0x2000, - 0x68c: 0x2000, - 0x692: 0x2000, - 0x6a0: 0x2000, 0x6a1: 0x2000, - 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, - 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, - // Block 0x1b, offset 0x6c0 - 0x6c2: 0x2000, 0x6c3: 0x2000, - 0x6c6: 0x2000, 0x6c7: 0x2000, - 0x6d5: 0x2000, - 0x6d9: 0x2000, - 0x6e5: 0x2000, - 0x6ff: 0x2000, - // Block 0x1c, offset 0x700 - 0x712: 0x2000, - 0x71a: 0x4000, 0x71b: 0x4000, - 0x729: 0x4000, - 0x72a: 0x4000, - // Block 0x1d, offset 0x740 - 0x769: 0x4000, - 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, - 0x770: 0x4000, 0x773: 0x4000, - // Block 0x1e, offset 0x780 - 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, - 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, - 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, - 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, - 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, - 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, - 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, - 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, - 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, - 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, - 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, - 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, - 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, - 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, - 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, - 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, - // Block 0x20, offset 0x800 - 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, - 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, - 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, - 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, - 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, - 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, - 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, - 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, - 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, - 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, - 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, - // Block 0x21, offset 0x840 - 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, - 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, - 0x850: 0x2000, 0x851: 0x2000, - 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, - 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, - 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, - 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, - 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, - 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, - // Block 0x22, offset 0x880 - 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, - 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, - 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, - 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, - 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, - 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, - 0x8b2: 0x2000, 0x8b3: 0x2000, - 0x8b6: 0x2000, 0x8b7: 0x2000, - 0x8bc: 0x2000, 0x8bd: 0x2000, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x2000, 0x8c1: 0x2000, - 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, - 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, - 0x8e2: 0x2000, 0x8e3: 0x2000, - 0x8e4: 0x2000, 0x8e5: 0x2000, - 0x8ef: 0x2000, - 0x8fd: 0x4000, 0x8fe: 0x4000, - // Block 0x24, offset 0x900 - 0x905: 0x2000, - 0x906: 0x2000, 0x909: 0x2000, - 0x90e: 0x2000, 0x90f: 0x2000, - 0x914: 0x4000, 0x915: 0x4000, - 0x91c: 0x2000, - 0x91e: 0x2000, - // Block 0x25, offset 0x940 - 0x940: 0x2000, 0x942: 0x2000, - 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, - 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, - 0x952: 0x4000, 0x953: 0x4000, - 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, - 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, - 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, - 0x97f: 0x4000, - // Block 0x26, offset 0x980 - 0x993: 0x4000, - 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, - 0x9aa: 0x4000, 0x9ab: 0x4000, - 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, - // Block 0x27, offset 0x9c0 - 0x9c4: 0x4000, 0x9c5: 0x4000, - 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, - 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, - 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, - 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, - 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, - 0x9e8: 0x2000, 0x9e9: 0x2000, - 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, - 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, - 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, - 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, - // Block 0x28, offset 0xa00 - 0xa05: 0x4000, - 0xa0a: 0x4000, 0xa0b: 0x4000, - 0xa28: 0x4000, - 0xa3d: 0x2000, - // Block 0x29, offset 0xa40 - 0xa4c: 0x4000, 0xa4e: 0x4000, - 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, - 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, - 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, - // Block 0x2a, offset 0xa80 - 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, - 0xab0: 0x4000, - 0xabf: 0x4000, - // Block 0x2b, offset 0xac0 - 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, - 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, - // Block 0x2c, offset 0xb00 - 0xb05: 0x6010, - 0xb06: 0x6011, - // Block 0x2d, offset 0xb40 - 0xb5b: 0x4000, 0xb5c: 0x4000, - // Block 0x2e, offset 0xb80 - 0xb90: 0x4000, - 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, - 0xb98: 0x2000, 0xb99: 0x2000, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, - 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, - 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, - 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, - 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, - 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, - 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, - 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, - 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, - 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, - 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, - // Block 0x30, offset 0xc00 - 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, - 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, - 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, - 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, - 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, - 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, - 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, - 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, - 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, - // Block 0x31, offset 0xc40 - 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, - 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, - 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, - 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, - 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, - 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, - // Block 0x32, offset 0xc80 - 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, - 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, - 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, - 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, - 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, - 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, - 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, - 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, - 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, - 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, - 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, - // Block 0x33, offset 0xcc0 - 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, - 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, - 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, - 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, - 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, - 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, - 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, - 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, - 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, - 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, - 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, - // Block 0x34, offset 0xd00 - 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, - 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, - 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, - 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, - 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, - 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, - 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, - 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, - 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, - 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, - 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, - // Block 0x35, offset 0xd40 - 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, - 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, - 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, - 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, - 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, - 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, - 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, - 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, - 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, - 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, - 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, - // Block 0x36, offset 0xd80 - 0xd85: 0x4000, - 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, - 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, - 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, - 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, - 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, - 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, - 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, - 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, - 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, - 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, - 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, - 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, - 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, - 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, - 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, - 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, - 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, - 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, - 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, - 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, - // Block 0x38, offset 0xe00 - 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, - 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, - 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, - 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, - 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, - 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, - 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, - 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, - 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, - 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, - // Block 0x39, offset 0xe40 - 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, - 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, - 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, - 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, - 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, - 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, - 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, - 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, - 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, - // Block 0x3a, offset 0xe80 - 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, - 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, - 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, - 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, - 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, - 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, - 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, - 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, - 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, - 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, - 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, - // Block 0x3b, offset 0xec0 - 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, - 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, - 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, - 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, - 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, - 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, - 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, - 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, - 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, - 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, - 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, - // Block 0x3c, offset 0xf00 - 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, - 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, - 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, - 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, - 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, - 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, - 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, - 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, - 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, - 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, - 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, - // Block 0x3d, offset 0xf40 - 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, - 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, - 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, - 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, - 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, - 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, - 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, - 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, - 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, - 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, - 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, - // Block 0x3e, offset 0xf80 - 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, - 0xf86: 0x4000, - // Block 0x3f, offset 0xfc0 - 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, - 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, - 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, - 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, - 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, - 0xffc: 0x4000, - // Block 0x40, offset 0x1000 - 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, - 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, - 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, - 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, - 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, - 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, - // Block 0x41, offset 0x1040 - 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, - 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, - 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, - 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, - 0x1058: 0x4000, 0x1059: 0x4000, - 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, - 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, - 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, - // Block 0x42, offset 0x1080 - 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, - 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, - 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, - 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, - 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, - 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, - 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, - 0x10aa: 0x4000, 0x10ab: 0x4000, - // Block 0x43, offset 0x10c0 - 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, - 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, - 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, - 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, - 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, - 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, - 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, - 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, - 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, - 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, - 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, - // Block 0x44, offset 0x1100 - 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, - 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, - 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, - 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, - 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, - 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, - 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, - 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, - 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, - 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, - 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, - // Block 0x45, offset 0x1140 - 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, - 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, - 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, - 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, - 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, - 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, - 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, - 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, - 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, - 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, - 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, - // Block 0x46, offset 0x1180 - 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, - 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, - 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, - 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, - 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, - 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, - 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, - 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, - 0x11bd: 0x2000, - // Block 0x47, offset 0x11c0 - 0x11e0: 0x4000, 0x11e1: 0x4000, - // Block 0x48, offset 0x1200 - 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, - 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, - 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, - 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, - 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, - 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, - 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, - 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, 0x122d: 0x4000, 0x122e: 0x4000, 0x122f: 0x4000, - 0x1230: 0x4000, 0x1231: 0x4000, - // Block 0x49, offset 0x1240 - 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, - 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, - 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, - 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, - 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, - 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, - 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, - 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, - 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, - // Block 0x4a, offset 0x1280 - 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, - 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, - 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, - 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, - 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, - 0x129e: 0x4000, - // Block 0x4b, offset 0x12c0 - 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, - 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, - 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, - // Block 0x4c, offset 0x1300 - 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, - 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, - 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, - 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, - 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, - 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, - 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, - 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, - 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, - 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, - // Block 0x4d, offset 0x1340 - 0x1344: 0x4000, - // Block 0x4e, offset 0x1380 - 0x138f: 0x4000, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, - 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, - 0x13d0: 0x2000, 0x13d1: 0x2000, - 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, - 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, - 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, - 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, - 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, - 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, - 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, - 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, - // Block 0x50, offset 0x1400 - 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, - 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, - 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, - 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, - 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, - 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, - 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, - 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, - 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, - 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, - // Block 0x51, offset 0x1440 - 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, - 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, - 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, - 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, - 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, - 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, - 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, - 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, - // Block 0x52, offset 0x1480 - 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, - 0x1490: 0x4000, 0x1491: 0x4000, - 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, - 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, - 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, - 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, - 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, - 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, - 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, - 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, - 0x14d0: 0x4000, 0x14d1: 0x4000, - 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, - 0x14e4: 0x4000, 0x14e5: 0x4000, - // Block 0x54, offset 0x1500 - 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, - 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, - 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, - 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, - 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, - 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, - 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, - 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, - 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, - 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, - // Block 0x55, offset 0x1540 - 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, - 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, - 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, - 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, - 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, - 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, - 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, - 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, - 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, - 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, - 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, - // Block 0x56, offset 0x1580 - 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, - 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, - 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, - 0x1592: 0x4000, 0x1593: 0x4000, - 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, - 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, - 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, - 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, - 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, - 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, - // Block 0x57, offset 0x15c0 - 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, - 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, - 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, - 0x15d2: 0x4000, 0x15d3: 0x4000, - 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, - 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, - 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, - 0x15f0: 0x4000, 0x15f4: 0x4000, - 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, - 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, - // Block 0x58, offset 0x1600 - 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, - 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, - 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, - 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, - 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, - 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, - 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, - 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, - 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, - 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, - 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, - // Block 0x59, offset 0x1640 - 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, - 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, - 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, - 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, - 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, - 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, - 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, - 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, - 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, - 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, - 0x167c: 0x4000, 0x167f: 0x4000, - // Block 0x5a, offset 0x1680 - 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, - 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, - 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, - 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, - 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, - 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, - 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, - 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, - 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, - 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, - 0x16bc: 0x4000, 0x16bd: 0x4000, - // Block 0x5b, offset 0x16c0 - 0x16cb: 0x4000, - 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, - 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, - 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, - 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, - 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, - 0x16fa: 0x4000, - // Block 0x5c, offset 0x1700 - 0x1715: 0x4000, 0x1716: 0x4000, - 0x1724: 0x4000, - // Block 0x5d, offset 0x1740 - 0x177b: 0x4000, - 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, - // Block 0x5e, offset 0x1780 - 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, - 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, - 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, - 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, - 0x17d2: 0x4000, - 0x17eb: 0x4000, 0x17ec: 0x4000, - 0x17f4: 0x4000, 0x17f5: 0x4000, - 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, 0x17f9: 0x4000, - // Block 0x60, offset 0x1800 - 0x1810: 0x4000, 0x1811: 0x4000, - 0x1812: 0x4000, 0x1813: 0x4000, 0x1814: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, - 0x1818: 0x4000, 0x1819: 0x4000, 0x181a: 0x4000, 0x181b: 0x4000, 0x181c: 0x4000, 0x181d: 0x4000, - 0x181e: 0x4000, 0x181f: 0x4000, 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, - 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, - 0x182a: 0x4000, 0x182b: 0x4000, 0x182c: 0x4000, 0x182d: 0x4000, 0x182e: 0x4000, 0x182f: 0x4000, - 0x1830: 0x4000, 0x1831: 0x4000, 0x1832: 0x4000, 0x1833: 0x4000, 0x1834: 0x4000, 0x1835: 0x4000, - 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, - 0x183c: 0x4000, 0x183d: 0x4000, 0x183e: 0x4000, - // Block 0x61, offset 0x1840 - 0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000, - 0x1846: 0x4000, 0x1847: 0x4000, 0x1848: 0x4000, 0x1849: 0x4000, 0x184a: 0x4000, 0x184b: 0x4000, - 0x184c: 0x4000, 0x184d: 0x4000, 0x184e: 0x4000, 0x184f: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, - 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, - 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, - 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, - 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, - 0x186a: 0x4000, 0x186b: 0x4000, 0x186c: 0x4000, 0x186d: 0x4000, 0x186e: 0x4000, 0x186f: 0x4000, - 0x1870: 0x4000, 0x1873: 0x4000, 0x1874: 0x4000, 0x1875: 0x4000, - 0x1876: 0x4000, 0x187a: 0x4000, - 0x187c: 0x4000, 0x187d: 0x4000, 0x187e: 0x4000, 0x187f: 0x4000, - // Block 0x62, offset 0x1880 - 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, - 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, - 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, - 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, - 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, - 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, - 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b2: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, - 0x18b6: 0x4000, 0x18b7: 0x4000, 0x18b8: 0x4000, 0x18b9: 0x4000, - // Block 0x63, offset 0x18c0 - 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, - 0x18d0: 0x4000, 0x18d1: 0x4000, - 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, - 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, - 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, - 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, - 0x18ea: 0x4000, 0x18eb: 0x4000, 0x18ec: 0x4000, 0x18ed: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, - 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, - 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18f9: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, - 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, - // Block 0x64, offset 0x1900 - 0x1900: 0x2000, 0x1901: 0x2000, 0x1902: 0x2000, 0x1903: 0x2000, 0x1904: 0x2000, 0x1905: 0x2000, - 0x1906: 0x2000, 0x1907: 0x2000, 0x1908: 0x2000, 0x1909: 0x2000, 0x190a: 0x2000, 0x190b: 0x2000, - 0x190c: 0x2000, 0x190d: 0x2000, 0x190e: 0x2000, 0x190f: 0x2000, 0x1910: 0x2000, 0x1911: 0x2000, - 0x1912: 0x2000, 0x1913: 0x2000, 0x1914: 0x2000, 0x1915: 0x2000, 0x1916: 0x2000, 0x1917: 0x2000, - 0x1918: 0x2000, 0x1919: 0x2000, 0x191a: 0x2000, 0x191b: 0x2000, 0x191c: 0x2000, 0x191d: 0x2000, - 0x191e: 0x2000, 0x191f: 0x2000, 0x1920: 0x2000, 0x1921: 0x2000, 0x1922: 0x2000, 0x1923: 0x2000, - 0x1924: 0x2000, 0x1925: 0x2000, 0x1926: 0x2000, 0x1927: 0x2000, 0x1928: 0x2000, 0x1929: 0x2000, - 0x192a: 0x2000, 0x192b: 0x2000, 0x192c: 0x2000, 0x192d: 0x2000, 0x192e: 0x2000, 0x192f: 0x2000, - 0x1930: 0x2000, 0x1931: 0x2000, 0x1932: 0x2000, 0x1933: 0x2000, 0x1934: 0x2000, 0x1935: 0x2000, - 0x1936: 0x2000, 0x1937: 0x2000, 0x1938: 0x2000, 0x1939: 0x2000, 0x193a: 0x2000, 0x193b: 0x2000, - 0x193c: 0x2000, 0x193d: 0x2000, -} - -// widthIndex: 22 blocks, 1408 entries, 1408 bytes -// Block 0 is the zero block. -var widthIndex = [1408]uint8{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, - 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, - 0xd0: 0x0c, 0xd1: 0x0d, - 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, - 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, - 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, - // Block 0x4, offset 0x100 - 0x104: 0x0e, 0x105: 0x0f, - // Block 0x5, offset 0x140 - 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, - 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, - 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, - 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, - 0x166: 0x2a, - 0x16c: 0x2b, 0x16d: 0x2c, - 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, - // Block 0x6, offset 0x180 - 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, - 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, - 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, - 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, - 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, - 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, - 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, - 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, - 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, - 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, - 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, - 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, - 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, - 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, - 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, - // Block 0x8, offset 0x200 - 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, - 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, - 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, - 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, - 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, - 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, - 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, - 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, - // Block 0x9, offset 0x240 - 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, - 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, - 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, - 0x265: 0x3d, - 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, - 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, - // Block 0xa, offset 0x280 - 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, - 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, - 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, - 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, - 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, - 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, - 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, - 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, - 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, - 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, - 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, - // Block 0xc, offset 0x300 - 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, - 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, - 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, - 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, - 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, - 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, - 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, - // Block 0xd, offset 0x340 - 0x37f: 0x45, - // Block 0xe, offset 0x380 - 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, - 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, - 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, - 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, - 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, - 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, - 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, - // Block 0x10, offset 0x400 - 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, - 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, - 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, - 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, - 0x424: 0x5e, 0x425: 0x5f, 0x426: 0x60, 0x427: 0x61, - // Block 0x11, offset 0x440 - 0x456: 0x0b, 0x457: 0x06, - 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, - 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, - 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, - 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, - 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, - // Block 0x12, offset 0x480 - 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, - 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, - 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, - 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, - 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, - 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, - 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, - 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x62, - // Block 0x14, offset 0x500 - 0x520: 0x10, - 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, - 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, - // Block 0x15, offset 0x540 - 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, - 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, -} - -// inverseData contains 4-byte entries of the following format: -// -// <0 padding> -// -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// -// { 0x01, 0xE0, 0x00, 0x00 } -// -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// -// E0 ^ A1 = 41. -// -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// -// E0 ^ A2 = 42. -// -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8. -var inverseData = [150][4]byte{ - {0x00, 0x00, 0x00, 0x00}, - {0x03, 0xe3, 0x80, 0xa0}, - {0x03, 0xef, 0xbc, 0xa0}, - {0x03, 0xef, 0xbc, 0xe0}, - {0x03, 0xef, 0xbd, 0xe0}, - {0x03, 0xef, 0xbf, 0x02}, - {0x03, 0xef, 0xbf, 0x00}, - {0x03, 0xef, 0xbf, 0x0e}, - {0x03, 0xef, 0xbf, 0x0c}, - {0x03, 0xef, 0xbf, 0x0f}, - {0x03, 0xef, 0xbf, 0x39}, - {0x03, 0xef, 0xbf, 0x3b}, - {0x03, 0xef, 0xbf, 0x3f}, - {0x03, 0xef, 0xbf, 0x2a}, - {0x03, 0xef, 0xbf, 0x0d}, - {0x03, 0xef, 0xbf, 0x25}, - {0x03, 0xef, 0xbd, 0x1a}, - {0x03, 0xef, 0xbd, 0x26}, - {0x01, 0xa0, 0x00, 0x00}, - {0x03, 0xef, 0xbd, 0x25}, - {0x03, 0xef, 0xbd, 0x23}, - {0x03, 0xef, 0xbd, 0x2e}, - {0x03, 0xef, 0xbe, 0x07}, - {0x03, 0xef, 0xbe, 0x05}, - {0x03, 0xef, 0xbd, 0x06}, - {0x03, 0xef, 0xbd, 0x13}, - {0x03, 0xef, 0xbd, 0x0b}, - {0x03, 0xef, 0xbd, 0x16}, - {0x03, 0xef, 0xbd, 0x0c}, - {0x03, 0xef, 0xbd, 0x15}, - {0x03, 0xef, 0xbd, 0x0d}, - {0x03, 0xef, 0xbd, 0x1c}, - {0x03, 0xef, 0xbd, 0x02}, - {0x03, 0xef, 0xbd, 0x1f}, - {0x03, 0xef, 0xbd, 0x1d}, - {0x03, 0xef, 0xbd, 0x17}, - {0x03, 0xef, 0xbd, 0x08}, - {0x03, 0xef, 0xbd, 0x09}, - {0x03, 0xef, 0xbd, 0x0e}, - {0x03, 0xef, 0xbd, 0x04}, - {0x03, 0xef, 0xbd, 0x05}, - {0x03, 0xef, 0xbe, 0x3f}, - {0x03, 0xef, 0xbe, 0x00}, - {0x03, 0xef, 0xbd, 0x2c}, - {0x03, 0xef, 0xbe, 0x06}, - {0x03, 0xef, 0xbe, 0x0c}, - {0x03, 0xef, 0xbe, 0x0f}, - {0x03, 0xef, 0xbe, 0x0d}, - {0x03, 0xef, 0xbe, 0x0b}, - {0x03, 0xef, 0xbe, 0x19}, - {0x03, 0xef, 0xbe, 0x15}, - {0x03, 0xef, 0xbe, 0x11}, - {0x03, 0xef, 0xbe, 0x31}, - {0x03, 0xef, 0xbe, 0x33}, - {0x03, 0xef, 0xbd, 0x0f}, - {0x03, 0xef, 0xbe, 0x30}, - {0x03, 0xef, 0xbe, 0x3e}, - {0x03, 0xef, 0xbe, 0x32}, - {0x03, 0xef, 0xbe, 0x36}, - {0x03, 0xef, 0xbd, 0x14}, - {0x03, 0xef, 0xbe, 0x2e}, - {0x03, 0xef, 0xbd, 0x1e}, - {0x03, 0xef, 0xbe, 0x10}, - {0x03, 0xef, 0xbf, 0x13}, - {0x03, 0xef, 0xbf, 0x15}, - {0x03, 0xef, 0xbf, 0x17}, - {0x03, 0xef, 0xbf, 0x1f}, - {0x03, 0xef, 0xbf, 0x1d}, - {0x03, 0xef, 0xbf, 0x1b}, - {0x03, 0xef, 0xbf, 0x09}, - {0x03, 0xef, 0xbf, 0x0b}, - {0x03, 0xef, 0xbf, 0x37}, - {0x03, 0xef, 0xbe, 0x04}, - {0x01, 0xe0, 0x00, 0x00}, - {0x03, 0xe2, 0xa6, 0x1a}, - {0x03, 0xe2, 0xa6, 0x26}, - {0x03, 0xe3, 0x80, 0x23}, - {0x03, 0xe3, 0x80, 0x2e}, - {0x03, 0xe3, 0x80, 0x25}, - {0x03, 0xe3, 0x83, 0x1e}, - {0x03, 0xe3, 0x83, 0x14}, - {0x03, 0xe3, 0x82, 0x06}, - {0x03, 0xe3, 0x82, 0x0b}, - {0x03, 0xe3, 0x82, 0x0c}, - {0x03, 0xe3, 0x82, 0x0d}, - {0x03, 0xe3, 0x82, 0x02}, - {0x03, 0xe3, 0x83, 0x0f}, - {0x03, 0xe3, 0x83, 0x08}, - {0x03, 0xe3, 0x83, 0x09}, - {0x03, 0xe3, 0x83, 0x2c}, - {0x03, 0xe3, 0x83, 0x0c}, - {0x03, 0xe3, 0x82, 0x13}, - {0x03, 0xe3, 0x82, 0x16}, - {0x03, 0xe3, 0x82, 0x15}, - {0x03, 0xe3, 0x82, 0x1c}, - {0x03, 0xe3, 0x82, 0x1f}, - {0x03, 0xe3, 0x82, 0x1d}, - {0x03, 0xe3, 0x82, 0x1a}, - {0x03, 0xe3, 0x82, 0x17}, - {0x03, 0xe3, 0x82, 0x08}, - {0x03, 0xe3, 0x82, 0x09}, - {0x03, 0xe3, 0x82, 0x0e}, - {0x03, 0xe3, 0x82, 0x04}, - {0x03, 0xe3, 0x82, 0x05}, - {0x03, 0xe3, 0x82, 0x3f}, - {0x03, 0xe3, 0x83, 0x00}, - {0x03, 0xe3, 0x83, 0x06}, - {0x03, 0xe3, 0x83, 0x05}, - {0x03, 0xe3, 0x83, 0x0d}, - {0x03, 0xe3, 0x83, 0x0b}, - {0x03, 0xe3, 0x83, 0x07}, - {0x03, 0xe3, 0x83, 0x19}, - {0x03, 0xe3, 0x83, 0x15}, - {0x03, 0xe3, 0x83, 0x11}, - {0x03, 0xe3, 0x83, 0x31}, - {0x03, 0xe3, 0x83, 0x33}, - {0x03, 0xe3, 0x83, 0x30}, - {0x03, 0xe3, 0x83, 0x3e}, - {0x03, 0xe3, 0x83, 0x32}, - {0x03, 0xe3, 0x83, 0x36}, - {0x03, 0xe3, 0x83, 0x2e}, - {0x03, 0xe3, 0x82, 0x07}, - {0x03, 0xe3, 0x85, 0x04}, - {0x03, 0xe3, 0x84, 0x10}, - {0x03, 0xe3, 0x85, 0x30}, - {0x03, 0xe3, 0x85, 0x0d}, - {0x03, 0xe3, 0x85, 0x13}, - {0x03, 0xe3, 0x85, 0x15}, - {0x03, 0xe3, 0x85, 0x17}, - {0x03, 0xe3, 0x85, 0x1f}, - {0x03, 0xe3, 0x85, 0x1d}, - {0x03, 0xe3, 0x85, 0x1b}, - {0x03, 0xe3, 0x85, 0x09}, - {0x03, 0xe3, 0x85, 0x0f}, - {0x03, 0xe3, 0x85, 0x0b}, - {0x03, 0xe3, 0x85, 0x37}, - {0x03, 0xe3, 0x85, 0x3b}, - {0x03, 0xe3, 0x85, 0x39}, - {0x03, 0xe3, 0x85, 0x3f}, - {0x02, 0xc2, 0x02, 0x00}, - {0x02, 0xc2, 0x0e, 0x00}, - {0x02, 0xc2, 0x0c, 0x00}, - {0x02, 0xc2, 0x00, 0x00}, - {0x03, 0xe2, 0x82, 0x0f}, - {0x03, 0xe2, 0x94, 0x2a}, - {0x03, 0xe2, 0x86, 0x39}, - {0x03, 0xe2, 0x86, 0x3b}, - {0x03, 0xe2, 0x86, 0x3f}, - {0x03, 0xe2, 0x96, 0x0d}, - {0x03, 0xe2, 0x97, 0x25}, -} - -// Total table size 14936 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/tables12.0.0.go b/vendor/golang.org/x/text/width/tables12.0.0.go deleted file mode 100644 index 5c14ade6d..000000000 --- a/vendor/golang.org/x/text/width/tables12.0.0.go +++ /dev/null @@ -1,1361 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -//go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 - -package width - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "12.0.0" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// widthTrie. Total size: 14720 bytes (14.38 KiB). Checksum: 3f4f2516ded5489b. -type widthTrie struct{} - -func newWidthTrie(i int) *widthTrie { - return &widthTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { - switch { - default: - return uint16(widthValues[n<<6+uint32(b)]) - } -} - -// widthValues: 104 blocks, 6656 entries, 13312 bytes -// The third block is the zero block. -var widthValues = [6656]uint16{ - // Block 0x0, offset 0x0 - 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, - 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, - 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, - 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, - 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, - 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, - // Block 0x1, offset 0x40 - 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, - 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, - 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, - 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, - 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, - 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, - 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, - 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, - 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, - 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, - 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, - 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, - 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, - 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, - 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, - 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, - // Block 0x4, offset 0x100 - 0x106: 0x2000, - 0x110: 0x2000, - 0x117: 0x2000, - 0x118: 0x2000, - 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, - 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, - 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, - 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, - 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, - 0x13c: 0x2000, 0x13e: 0x2000, - // Block 0x5, offset 0x140 - 0x141: 0x2000, - 0x151: 0x2000, - 0x153: 0x2000, - 0x15b: 0x2000, - 0x166: 0x2000, 0x167: 0x2000, - 0x16b: 0x2000, - 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, - 0x178: 0x2000, - 0x17f: 0x2000, - // Block 0x6, offset 0x180 - 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, - 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, - 0x18d: 0x2000, - 0x192: 0x2000, 0x193: 0x2000, - 0x1a6: 0x2000, 0x1a7: 0x2000, - 0x1ab: 0x2000, - // Block 0x7, offset 0x1c0 - 0x1ce: 0x2000, 0x1d0: 0x2000, - 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, - 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, - // Block 0x8, offset 0x200 - 0x211: 0x2000, - 0x221: 0x2000, - // Block 0x9, offset 0x240 - 0x244: 0x2000, - 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, - 0x24d: 0x2000, 0x250: 0x2000, - 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, - 0x25f: 0x2000, - // Block 0xa, offset 0x280 - 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, - 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, - 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, - 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, - 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, - 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, - 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, - 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, - 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, - 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, - 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, - 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, - 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, - 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, - 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, - 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, - 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, - 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, - // Block 0xc, offset 0x300 - 0x311: 0x2000, - 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, - 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, - 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, - 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, - 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, - 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, - 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, - // Block 0xd, offset 0x340 - 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, - 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, - // Block 0xe, offset 0x380 - 0x381: 0x2000, - 0x390: 0x2000, 0x391: 0x2000, - 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, - 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, - 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, - 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, - 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, - 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, - 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, - 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, - 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, - 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, - // Block 0x10, offset 0x400 - 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, - 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, - 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, - 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, - 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, - 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, - 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, - 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, - 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, - 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, - 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, - // Block 0x11, offset 0x440 - 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, - 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, - 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, - 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, - 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, - 0x45e: 0x4000, 0x45f: 0x4000, - // Block 0x12, offset 0x480 - 0x490: 0x2000, - 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, - 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, - 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, - 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, - 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, - 0x4bb: 0x2000, - 0x4be: 0x2000, - // Block 0x13, offset 0x4c0 - 0x4f4: 0x2000, - 0x4ff: 0x2000, - // Block 0x14, offset 0x500 - 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, - 0x529: 0xa009, - 0x52c: 0x2000, - // Block 0x15, offset 0x540 - 0x543: 0x2000, 0x545: 0x2000, - 0x549: 0x2000, - 0x553: 0x2000, 0x556: 0x2000, - 0x561: 0x2000, 0x562: 0x2000, - 0x566: 0x2000, - 0x56b: 0x2000, - // Block 0x16, offset 0x580 - 0x593: 0x2000, 0x594: 0x2000, - 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, - 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, - 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, - 0x5aa: 0x2000, 0x5ab: 0x2000, - 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, - 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, - // Block 0x17, offset 0x5c0 - 0x5c9: 0x2000, - 0x5d0: 0x200a, 0x5d1: 0x200b, - 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, - 0x5d8: 0x2000, 0x5d9: 0x2000, - 0x5f8: 0x2000, 0x5f9: 0x2000, - // Block 0x18, offset 0x600 - 0x612: 0x2000, 0x614: 0x2000, - 0x627: 0x2000, - // Block 0x19, offset 0x640 - 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, - 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, - 0x64f: 0x2000, 0x651: 0x2000, - 0x655: 0x2000, - 0x65a: 0x2000, 0x65d: 0x2000, - 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, - 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, - 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, - 0x674: 0x2000, 0x675: 0x2000, - 0x676: 0x2000, 0x677: 0x2000, - 0x67c: 0x2000, 0x67d: 0x2000, - // Block 0x1a, offset 0x680 - 0x688: 0x2000, - 0x68c: 0x2000, - 0x692: 0x2000, - 0x6a0: 0x2000, 0x6a1: 0x2000, - 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, - 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, - // Block 0x1b, offset 0x6c0 - 0x6c2: 0x2000, 0x6c3: 0x2000, - 0x6c6: 0x2000, 0x6c7: 0x2000, - 0x6d5: 0x2000, - 0x6d9: 0x2000, - 0x6e5: 0x2000, - 0x6ff: 0x2000, - // Block 0x1c, offset 0x700 - 0x712: 0x2000, - 0x71a: 0x4000, 0x71b: 0x4000, - 0x729: 0x4000, - 0x72a: 0x4000, - // Block 0x1d, offset 0x740 - 0x769: 0x4000, - 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, - 0x770: 0x4000, 0x773: 0x4000, - // Block 0x1e, offset 0x780 - 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, - 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, - 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, - 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, - 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, - 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, - 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, - 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, - 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, - 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, - 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, - 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, - 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, - 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, - 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, - 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, - // Block 0x20, offset 0x800 - 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, - 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, - 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, - 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, - 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, - 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, - 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, - 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, - 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, - 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, - 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, - // Block 0x21, offset 0x840 - 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, - 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, - 0x850: 0x2000, 0x851: 0x2000, - 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, - 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, - 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, - 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, - 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, - 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, - // Block 0x22, offset 0x880 - 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, - 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, - 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, - 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, - 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, - 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, - 0x8b2: 0x2000, 0x8b3: 0x2000, - 0x8b6: 0x2000, 0x8b7: 0x2000, - 0x8bc: 0x2000, 0x8bd: 0x2000, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x2000, 0x8c1: 0x2000, - 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, - 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, - 0x8e2: 0x2000, 0x8e3: 0x2000, - 0x8e4: 0x2000, 0x8e5: 0x2000, - 0x8ef: 0x2000, - 0x8fd: 0x4000, 0x8fe: 0x4000, - // Block 0x24, offset 0x900 - 0x905: 0x2000, - 0x906: 0x2000, 0x909: 0x2000, - 0x90e: 0x2000, 0x90f: 0x2000, - 0x914: 0x4000, 0x915: 0x4000, - 0x91c: 0x2000, - 0x91e: 0x2000, - // Block 0x25, offset 0x940 - 0x940: 0x2000, 0x942: 0x2000, - 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, - 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, - 0x952: 0x4000, 0x953: 0x4000, - 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, - 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, - 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, - 0x97f: 0x4000, - // Block 0x26, offset 0x980 - 0x993: 0x4000, - 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, - 0x9aa: 0x4000, 0x9ab: 0x4000, - 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, - // Block 0x27, offset 0x9c0 - 0x9c4: 0x4000, 0x9c5: 0x4000, - 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, - 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, - 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, - 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, - 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, - 0x9e8: 0x2000, 0x9e9: 0x2000, - 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, - 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, - 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, - 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, - // Block 0x28, offset 0xa00 - 0xa05: 0x4000, - 0xa0a: 0x4000, 0xa0b: 0x4000, - 0xa28: 0x4000, - 0xa3d: 0x2000, - // Block 0x29, offset 0xa40 - 0xa4c: 0x4000, 0xa4e: 0x4000, - 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, - 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, - 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, - // Block 0x2a, offset 0xa80 - 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, - 0xab0: 0x4000, - 0xabf: 0x4000, - // Block 0x2b, offset 0xac0 - 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, - 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, - // Block 0x2c, offset 0xb00 - 0xb05: 0x6010, - 0xb06: 0x6011, - // Block 0x2d, offset 0xb40 - 0xb5b: 0x4000, 0xb5c: 0x4000, - // Block 0x2e, offset 0xb80 - 0xb90: 0x4000, - 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, - 0xb98: 0x2000, 0xb99: 0x2000, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, - 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, - 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, - 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, - 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, - 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, - 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, - 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, - 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, - 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, - 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, - // Block 0x30, offset 0xc00 - 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, - 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, - 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, - 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, - 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, - 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, - 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, - 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, - 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, - // Block 0x31, offset 0xc40 - 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, - 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, - 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, - 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, - 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, - 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, - // Block 0x32, offset 0xc80 - 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, - 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, - 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, - 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, - 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, - 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, - 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, - 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, - 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, - 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, - 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, - // Block 0x33, offset 0xcc0 - 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, - 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, - 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, - 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, - 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, - 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, - 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, - 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, - 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, - 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, - 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, - // Block 0x34, offset 0xd00 - 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, - 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, - 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, - 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, - 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, - 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, - 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, - 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, - 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, - 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, - 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, - // Block 0x35, offset 0xd40 - 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, - 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, - 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, - 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, - 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, - 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, - 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, - 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, - 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, - 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, - 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, - // Block 0x36, offset 0xd80 - 0xd85: 0x4000, - 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, - 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, - 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, - 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, - 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, - 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, - 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, - 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, - 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, - 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, - 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, - 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, - 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, - 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, - 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, - 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, - 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, - 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, - 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, - 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, - // Block 0x38, offset 0xe00 - 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, - 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, - 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, - 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, - 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, - 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, - 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, - 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, - 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, - 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, - // Block 0x39, offset 0xe40 - 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, - 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, - 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, - 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, - 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, - 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, - 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, - 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, - 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, - // Block 0x3a, offset 0xe80 - 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, - 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, - 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, - 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, - 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, - 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, - 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, - 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, - 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, - 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, - 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, - // Block 0x3b, offset 0xec0 - 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, - 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, - 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, - 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, - 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, - 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, - 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, - 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, - 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, - 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, - 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, - // Block 0x3c, offset 0xf00 - 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, - 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, - 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, - 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, - 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, - 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, - 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, - 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, - 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, - 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, - 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, - // Block 0x3d, offset 0xf40 - 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, - 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, - 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, - 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, - 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, - 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, - 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, - 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, - 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, - 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, - 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, - // Block 0x3e, offset 0xf80 - 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, - 0xf86: 0x4000, - // Block 0x3f, offset 0xfc0 - 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, - 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, - 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, - 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, - 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, - 0xffc: 0x4000, - // Block 0x40, offset 0x1000 - 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, - 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, - 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, - 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, - 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, - 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, - // Block 0x41, offset 0x1040 - 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, - 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, - 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, - 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, - 0x1058: 0x4000, 0x1059: 0x4000, - 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, - 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, - 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, - // Block 0x42, offset 0x1080 - 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, - 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, - 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, - 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, - 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, - 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, - 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, - 0x10aa: 0x4000, 0x10ab: 0x4000, - // Block 0x43, offset 0x10c0 - 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, - 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, - 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, - 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, - 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, - 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, - 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, - 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, - 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, - 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, - 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, - // Block 0x44, offset 0x1100 - 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, - 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, - 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, - 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, - 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, - 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, - 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, - 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, - 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, - 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, - 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, - // Block 0x45, offset 0x1140 - 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, - 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, - 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, - 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, - 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, - 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, - 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, - 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, - 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, - 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, - 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, - // Block 0x46, offset 0x1180 - 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, - 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, - 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, - 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, - 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, - 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, - 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, - 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, - 0x11bd: 0x2000, - // Block 0x47, offset 0x11c0 - 0x11e0: 0x4000, 0x11e1: 0x4000, 0x11e2: 0x4000, 0x11e3: 0x4000, - // Block 0x48, offset 0x1200 - 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, - 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, - 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, - 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, - 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, - 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, - 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, - 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, 0x122d: 0x4000, 0x122e: 0x4000, 0x122f: 0x4000, - 0x1230: 0x4000, 0x1231: 0x4000, 0x1232: 0x4000, 0x1233: 0x4000, 0x1234: 0x4000, 0x1235: 0x4000, - 0x1236: 0x4000, 0x1237: 0x4000, - // Block 0x49, offset 0x1240 - 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, - 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, - 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, - 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, - 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, - 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, - 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, - 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, - 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, - // Block 0x4a, offset 0x1280 - 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, - 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, - 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, - 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, - 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, - 0x129e: 0x4000, - // Block 0x4b, offset 0x12c0 - 0x12d0: 0x4000, 0x12d1: 0x4000, - 0x12d2: 0x4000, - 0x12e4: 0x4000, 0x12e5: 0x4000, 0x12e6: 0x4000, 0x12e7: 0x4000, - 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, - 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, - 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, - // Block 0x4c, offset 0x1300 - 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, - 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, - 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, - 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, - 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, - 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, - 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, - 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, - 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, - 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, - // Block 0x4d, offset 0x1340 - 0x1344: 0x4000, - // Block 0x4e, offset 0x1380 - 0x138f: 0x4000, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, - 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, - 0x13d0: 0x2000, 0x13d1: 0x2000, - 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, - 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, - 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, - 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, - 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, - 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, - 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, - 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, - // Block 0x50, offset 0x1400 - 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, - 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, - 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, - 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, - 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, - 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, - 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, - 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, - 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, - 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, - // Block 0x51, offset 0x1440 - 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, - 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, - 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, - 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, - 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, - 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, - 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, - 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, - // Block 0x52, offset 0x1480 - 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, - 0x1490: 0x4000, 0x1491: 0x4000, - 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, - 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, - 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, - 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, - 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, - 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, - 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, - 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, - 0x14d0: 0x4000, 0x14d1: 0x4000, - 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, - 0x14e4: 0x4000, 0x14e5: 0x4000, - // Block 0x54, offset 0x1500 - 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, - 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, - 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, - 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, - 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, - 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, - 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, - 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, - 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, - 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, - // Block 0x55, offset 0x1540 - 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, - 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, - 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, - 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, - 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, - 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, - 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, - 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, - 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, - 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, - 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, - // Block 0x56, offset 0x1580 - 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, - 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, - 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, - 0x1592: 0x4000, 0x1593: 0x4000, - 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, - 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, - 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, - 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, - 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, - 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, - // Block 0x57, offset 0x15c0 - 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, - 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, - 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, - 0x15d2: 0x4000, 0x15d3: 0x4000, - 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, - 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, - 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, - 0x15f0: 0x4000, 0x15f4: 0x4000, - 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, - 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, - // Block 0x58, offset 0x1600 - 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, - 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, - 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, - 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, - 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, - 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, - 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, - 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, - 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, - 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, - 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, - // Block 0x59, offset 0x1640 - 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, - 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, - 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, - 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, - 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, - 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, - 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, - 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, - 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, - 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, - 0x167c: 0x4000, 0x167f: 0x4000, - // Block 0x5a, offset 0x1680 - 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, - 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, - 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, - 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, - 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, - 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, - 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, - 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, - 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, - 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, - 0x16bc: 0x4000, 0x16bd: 0x4000, - // Block 0x5b, offset 0x16c0 - 0x16cb: 0x4000, - 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, - 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, - 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, - 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, - 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, - 0x16fa: 0x4000, - // Block 0x5c, offset 0x1700 - 0x1715: 0x4000, 0x1716: 0x4000, - 0x1724: 0x4000, - // Block 0x5d, offset 0x1740 - 0x177b: 0x4000, - 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, - // Block 0x5e, offset 0x1780 - 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, - 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, - 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, - 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, - 0x17d2: 0x4000, 0x17d5: 0x4000, - 0x17eb: 0x4000, 0x17ec: 0x4000, - 0x17f4: 0x4000, 0x17f5: 0x4000, - 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, 0x17f9: 0x4000, 0x17fa: 0x4000, - // Block 0x60, offset 0x1800 - 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, - 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, - 0x182a: 0x4000, 0x182b: 0x4000, - // Block 0x61, offset 0x1840 - 0x184d: 0x4000, 0x184e: 0x4000, 0x184f: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, - 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, - 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, - 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, - 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, - 0x186a: 0x4000, 0x186b: 0x4000, 0x186c: 0x4000, 0x186d: 0x4000, 0x186e: 0x4000, 0x186f: 0x4000, - 0x1870: 0x4000, 0x1871: 0x4000, 0x1872: 0x4000, 0x1873: 0x4000, 0x1874: 0x4000, 0x1875: 0x4000, - 0x1876: 0x4000, 0x1877: 0x4000, 0x1878: 0x4000, 0x1879: 0x4000, 0x187a: 0x4000, 0x187b: 0x4000, - 0x187c: 0x4000, 0x187d: 0x4000, 0x187e: 0x4000, 0x187f: 0x4000, - // Block 0x62, offset 0x1880 - 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, - 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, - 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, - 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, - 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, - 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, 0x18a3: 0x4000, - 0x18a4: 0x4000, 0x18a5: 0x4000, 0x18a6: 0x4000, 0x18a7: 0x4000, 0x18a8: 0x4000, 0x18a9: 0x4000, - 0x18aa: 0x4000, 0x18ab: 0x4000, 0x18ac: 0x4000, 0x18ad: 0x4000, 0x18ae: 0x4000, 0x18af: 0x4000, - 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, - 0x18b6: 0x4000, 0x18ba: 0x4000, 0x18bb: 0x4000, - 0x18bc: 0x4000, 0x18bd: 0x4000, 0x18be: 0x4000, 0x18bf: 0x4000, - // Block 0x63, offset 0x18c0 - 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, 0x18c3: 0x4000, 0x18c4: 0x4000, 0x18c5: 0x4000, - 0x18c6: 0x4000, 0x18c7: 0x4000, 0x18c8: 0x4000, 0x18c9: 0x4000, 0x18ca: 0x4000, 0x18cb: 0x4000, - 0x18cc: 0x4000, 0x18cd: 0x4000, 0x18ce: 0x4000, 0x18cf: 0x4000, 0x18d0: 0x4000, 0x18d1: 0x4000, - 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, - 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, - 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, - 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, - 0x18ea: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, - 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, - 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18f9: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, - 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, - // Block 0x64, offset 0x1900 - 0x1900: 0x4000, 0x1901: 0x4000, 0x1902: 0x4000, 0x1903: 0x4000, 0x1904: 0x4000, 0x1905: 0x4000, - 0x1906: 0x4000, 0x1907: 0x4000, 0x1908: 0x4000, 0x1909: 0x4000, 0x190a: 0x4000, - 0x190d: 0x4000, 0x190e: 0x4000, 0x190f: 0x4000, 0x1910: 0x4000, 0x1911: 0x4000, - 0x1912: 0x4000, 0x1913: 0x4000, 0x1914: 0x4000, 0x1915: 0x4000, 0x1916: 0x4000, 0x1917: 0x4000, - 0x1918: 0x4000, 0x1919: 0x4000, 0x191a: 0x4000, 0x191b: 0x4000, 0x191c: 0x4000, 0x191d: 0x4000, - 0x191e: 0x4000, 0x191f: 0x4000, 0x1920: 0x4000, 0x1921: 0x4000, 0x1922: 0x4000, 0x1923: 0x4000, - 0x1924: 0x4000, 0x1925: 0x4000, 0x1926: 0x4000, 0x1927: 0x4000, 0x1928: 0x4000, 0x1929: 0x4000, - 0x192a: 0x4000, 0x192b: 0x4000, 0x192c: 0x4000, 0x192d: 0x4000, 0x192e: 0x4000, 0x192f: 0x4000, - 0x1930: 0x4000, 0x1931: 0x4000, 0x1932: 0x4000, 0x1933: 0x4000, 0x1934: 0x4000, 0x1935: 0x4000, - 0x1936: 0x4000, 0x1937: 0x4000, 0x1938: 0x4000, 0x1939: 0x4000, 0x193a: 0x4000, 0x193b: 0x4000, - 0x193c: 0x4000, 0x193d: 0x4000, 0x193e: 0x4000, 0x193f: 0x4000, - // Block 0x65, offset 0x1940 - 0x1970: 0x4000, 0x1971: 0x4000, 0x1972: 0x4000, 0x1973: 0x4000, - 0x1978: 0x4000, 0x1979: 0x4000, 0x197a: 0x4000, - // Block 0x66, offset 0x1980 - 0x1980: 0x4000, 0x1981: 0x4000, 0x1982: 0x4000, - 0x1990: 0x4000, 0x1991: 0x4000, - 0x1992: 0x4000, 0x1993: 0x4000, 0x1994: 0x4000, 0x1995: 0x4000, - // Block 0x67, offset 0x19c0 - 0x19c0: 0x2000, 0x19c1: 0x2000, 0x19c2: 0x2000, 0x19c3: 0x2000, 0x19c4: 0x2000, 0x19c5: 0x2000, - 0x19c6: 0x2000, 0x19c7: 0x2000, 0x19c8: 0x2000, 0x19c9: 0x2000, 0x19ca: 0x2000, 0x19cb: 0x2000, - 0x19cc: 0x2000, 0x19cd: 0x2000, 0x19ce: 0x2000, 0x19cf: 0x2000, 0x19d0: 0x2000, 0x19d1: 0x2000, - 0x19d2: 0x2000, 0x19d3: 0x2000, 0x19d4: 0x2000, 0x19d5: 0x2000, 0x19d6: 0x2000, 0x19d7: 0x2000, - 0x19d8: 0x2000, 0x19d9: 0x2000, 0x19da: 0x2000, 0x19db: 0x2000, 0x19dc: 0x2000, 0x19dd: 0x2000, - 0x19de: 0x2000, 0x19df: 0x2000, 0x19e0: 0x2000, 0x19e1: 0x2000, 0x19e2: 0x2000, 0x19e3: 0x2000, - 0x19e4: 0x2000, 0x19e5: 0x2000, 0x19e6: 0x2000, 0x19e7: 0x2000, 0x19e8: 0x2000, 0x19e9: 0x2000, - 0x19ea: 0x2000, 0x19eb: 0x2000, 0x19ec: 0x2000, 0x19ed: 0x2000, 0x19ee: 0x2000, 0x19ef: 0x2000, - 0x19f0: 0x2000, 0x19f1: 0x2000, 0x19f2: 0x2000, 0x19f3: 0x2000, 0x19f4: 0x2000, 0x19f5: 0x2000, - 0x19f6: 0x2000, 0x19f7: 0x2000, 0x19f8: 0x2000, 0x19f9: 0x2000, 0x19fa: 0x2000, 0x19fb: 0x2000, - 0x19fc: 0x2000, 0x19fd: 0x2000, -} - -// widthIndex: 22 blocks, 1408 entries, 1408 bytes -// Block 0 is the zero block. -var widthIndex = [1408]uint8{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, - 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, - 0xd0: 0x0c, 0xd1: 0x0d, - 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, - 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, - 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, - // Block 0x4, offset 0x100 - 0x104: 0x0e, 0x105: 0x0f, - // Block 0x5, offset 0x140 - 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, - 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, - 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, - 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, - 0x166: 0x2a, - 0x16c: 0x2b, 0x16d: 0x2c, - 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, - // Block 0x6, offset 0x180 - 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, - 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, - 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, - 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, - 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, - 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, - 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, - 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, - 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, - 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, - 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, - 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, - 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, - 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, - 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, - // Block 0x8, offset 0x200 - 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, - 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, - 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, - 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, - 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, - 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, - 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, - 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, - // Block 0x9, offset 0x240 - 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, - 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, - 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, - 0x265: 0x3d, - 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, - 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, - // Block 0xa, offset 0x280 - 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, - 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, - 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, - 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, - 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, - 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, - 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, - 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, - 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, - 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, - 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, - // Block 0xc, offset 0x300 - 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, - 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, - 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, - 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, - 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, - 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, - 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, - // Block 0xd, offset 0x340 - 0x37f: 0x45, - // Block 0xe, offset 0x380 - 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, - 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, - 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, - 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, - 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, - 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, - 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, - // Block 0x10, offset 0x400 - 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, - 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, - 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, - 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, 0x41f: 0x5e, - 0x424: 0x5f, 0x425: 0x60, 0x426: 0x61, 0x427: 0x62, - 0x429: 0x63, 0x42a: 0x64, - // Block 0x11, offset 0x440 - 0x456: 0x0b, 0x457: 0x06, - 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, - 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, - 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, - 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, - 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, - // Block 0x12, offset 0x480 - 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, - 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, - 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, - 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, - 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, - 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, - 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, - 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x65, - // Block 0x14, offset 0x500 - 0x520: 0x10, - 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, - 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, - // Block 0x15, offset 0x540 - 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, - 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, -} - -// inverseData contains 4-byte entries of the following format: -// -// <0 padding> -// -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// -// { 0x01, 0xE0, 0x00, 0x00 } -// -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// -// E0 ^ A1 = 41. -// -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// -// E0 ^ A2 = 42. -// -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8. -var inverseData = [150][4]byte{ - {0x00, 0x00, 0x00, 0x00}, - {0x03, 0xe3, 0x80, 0xa0}, - {0x03, 0xef, 0xbc, 0xa0}, - {0x03, 0xef, 0xbc, 0xe0}, - {0x03, 0xef, 0xbd, 0xe0}, - {0x03, 0xef, 0xbf, 0x02}, - {0x03, 0xef, 0xbf, 0x00}, - {0x03, 0xef, 0xbf, 0x0e}, - {0x03, 0xef, 0xbf, 0x0c}, - {0x03, 0xef, 0xbf, 0x0f}, - {0x03, 0xef, 0xbf, 0x39}, - {0x03, 0xef, 0xbf, 0x3b}, - {0x03, 0xef, 0xbf, 0x3f}, - {0x03, 0xef, 0xbf, 0x2a}, - {0x03, 0xef, 0xbf, 0x0d}, - {0x03, 0xef, 0xbf, 0x25}, - {0x03, 0xef, 0xbd, 0x1a}, - {0x03, 0xef, 0xbd, 0x26}, - {0x01, 0xa0, 0x00, 0x00}, - {0x03, 0xef, 0xbd, 0x25}, - {0x03, 0xef, 0xbd, 0x23}, - {0x03, 0xef, 0xbd, 0x2e}, - {0x03, 0xef, 0xbe, 0x07}, - {0x03, 0xef, 0xbe, 0x05}, - {0x03, 0xef, 0xbd, 0x06}, - {0x03, 0xef, 0xbd, 0x13}, - {0x03, 0xef, 0xbd, 0x0b}, - {0x03, 0xef, 0xbd, 0x16}, - {0x03, 0xef, 0xbd, 0x0c}, - {0x03, 0xef, 0xbd, 0x15}, - {0x03, 0xef, 0xbd, 0x0d}, - {0x03, 0xef, 0xbd, 0x1c}, - {0x03, 0xef, 0xbd, 0x02}, - {0x03, 0xef, 0xbd, 0x1f}, - {0x03, 0xef, 0xbd, 0x1d}, - {0x03, 0xef, 0xbd, 0x17}, - {0x03, 0xef, 0xbd, 0x08}, - {0x03, 0xef, 0xbd, 0x09}, - {0x03, 0xef, 0xbd, 0x0e}, - {0x03, 0xef, 0xbd, 0x04}, - {0x03, 0xef, 0xbd, 0x05}, - {0x03, 0xef, 0xbe, 0x3f}, - {0x03, 0xef, 0xbe, 0x00}, - {0x03, 0xef, 0xbd, 0x2c}, - {0x03, 0xef, 0xbe, 0x06}, - {0x03, 0xef, 0xbe, 0x0c}, - {0x03, 0xef, 0xbe, 0x0f}, - {0x03, 0xef, 0xbe, 0x0d}, - {0x03, 0xef, 0xbe, 0x0b}, - {0x03, 0xef, 0xbe, 0x19}, - {0x03, 0xef, 0xbe, 0x15}, - {0x03, 0xef, 0xbe, 0x11}, - {0x03, 0xef, 0xbe, 0x31}, - {0x03, 0xef, 0xbe, 0x33}, - {0x03, 0xef, 0xbd, 0x0f}, - {0x03, 0xef, 0xbe, 0x30}, - {0x03, 0xef, 0xbe, 0x3e}, - {0x03, 0xef, 0xbe, 0x32}, - {0x03, 0xef, 0xbe, 0x36}, - {0x03, 0xef, 0xbd, 0x14}, - {0x03, 0xef, 0xbe, 0x2e}, - {0x03, 0xef, 0xbd, 0x1e}, - {0x03, 0xef, 0xbe, 0x10}, - {0x03, 0xef, 0xbf, 0x13}, - {0x03, 0xef, 0xbf, 0x15}, - {0x03, 0xef, 0xbf, 0x17}, - {0x03, 0xef, 0xbf, 0x1f}, - {0x03, 0xef, 0xbf, 0x1d}, - {0x03, 0xef, 0xbf, 0x1b}, - {0x03, 0xef, 0xbf, 0x09}, - {0x03, 0xef, 0xbf, 0x0b}, - {0x03, 0xef, 0xbf, 0x37}, - {0x03, 0xef, 0xbe, 0x04}, - {0x01, 0xe0, 0x00, 0x00}, - {0x03, 0xe2, 0xa6, 0x1a}, - {0x03, 0xe2, 0xa6, 0x26}, - {0x03, 0xe3, 0x80, 0x23}, - {0x03, 0xe3, 0x80, 0x2e}, - {0x03, 0xe3, 0x80, 0x25}, - {0x03, 0xe3, 0x83, 0x1e}, - {0x03, 0xe3, 0x83, 0x14}, - {0x03, 0xe3, 0x82, 0x06}, - {0x03, 0xe3, 0x82, 0x0b}, - {0x03, 0xe3, 0x82, 0x0c}, - {0x03, 0xe3, 0x82, 0x0d}, - {0x03, 0xe3, 0x82, 0x02}, - {0x03, 0xe3, 0x83, 0x0f}, - {0x03, 0xe3, 0x83, 0x08}, - {0x03, 0xe3, 0x83, 0x09}, - {0x03, 0xe3, 0x83, 0x2c}, - {0x03, 0xe3, 0x83, 0x0c}, - {0x03, 0xe3, 0x82, 0x13}, - {0x03, 0xe3, 0x82, 0x16}, - {0x03, 0xe3, 0x82, 0x15}, - {0x03, 0xe3, 0x82, 0x1c}, - {0x03, 0xe3, 0x82, 0x1f}, - {0x03, 0xe3, 0x82, 0x1d}, - {0x03, 0xe3, 0x82, 0x1a}, - {0x03, 0xe3, 0x82, 0x17}, - {0x03, 0xe3, 0x82, 0x08}, - {0x03, 0xe3, 0x82, 0x09}, - {0x03, 0xe3, 0x82, 0x0e}, - {0x03, 0xe3, 0x82, 0x04}, - {0x03, 0xe3, 0x82, 0x05}, - {0x03, 0xe3, 0x82, 0x3f}, - {0x03, 0xe3, 0x83, 0x00}, - {0x03, 0xe3, 0x83, 0x06}, - {0x03, 0xe3, 0x83, 0x05}, - {0x03, 0xe3, 0x83, 0x0d}, - {0x03, 0xe3, 0x83, 0x0b}, - {0x03, 0xe3, 0x83, 0x07}, - {0x03, 0xe3, 0x83, 0x19}, - {0x03, 0xe3, 0x83, 0x15}, - {0x03, 0xe3, 0x83, 0x11}, - {0x03, 0xe3, 0x83, 0x31}, - {0x03, 0xe3, 0x83, 0x33}, - {0x03, 0xe3, 0x83, 0x30}, - {0x03, 0xe3, 0x83, 0x3e}, - {0x03, 0xe3, 0x83, 0x32}, - {0x03, 0xe3, 0x83, 0x36}, - {0x03, 0xe3, 0x83, 0x2e}, - {0x03, 0xe3, 0x82, 0x07}, - {0x03, 0xe3, 0x85, 0x04}, - {0x03, 0xe3, 0x84, 0x10}, - {0x03, 0xe3, 0x85, 0x30}, - {0x03, 0xe3, 0x85, 0x0d}, - {0x03, 0xe3, 0x85, 0x13}, - {0x03, 0xe3, 0x85, 0x15}, - {0x03, 0xe3, 0x85, 0x17}, - {0x03, 0xe3, 0x85, 0x1f}, - {0x03, 0xe3, 0x85, 0x1d}, - {0x03, 0xe3, 0x85, 0x1b}, - {0x03, 0xe3, 0x85, 0x09}, - {0x03, 0xe3, 0x85, 0x0f}, - {0x03, 0xe3, 0x85, 0x0b}, - {0x03, 0xe3, 0x85, 0x37}, - {0x03, 0xe3, 0x85, 0x3b}, - {0x03, 0xe3, 0x85, 0x39}, - {0x03, 0xe3, 0x85, 0x3f}, - {0x02, 0xc2, 0x02, 0x00}, - {0x02, 0xc2, 0x0e, 0x00}, - {0x02, 0xc2, 0x0c, 0x00}, - {0x02, 0xc2, 0x00, 0x00}, - {0x03, 0xe2, 0x82, 0x0f}, - {0x03, 0xe2, 0x94, 0x2a}, - {0x03, 0xe2, 0x86, 0x39}, - {0x03, 0xe2, 0x86, 0x3b}, - {0x03, 0xe2, 0x86, 0x3f}, - {0x03, 0xe2, 0x96, 0x0d}, - {0x03, 0xe2, 0x97, 0x25}, -} - -// Total table size 15320 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/tables13.0.0.go b/vendor/golang.org/x/text/width/tables13.0.0.go deleted file mode 100644 index b1fcb522c..000000000 --- a/vendor/golang.org/x/text/width/tables13.0.0.go +++ /dev/null @@ -1,1362 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -//go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 - -package width - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "13.0.0" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// widthTrie. Total size: 14848 bytes (14.50 KiB). Checksum: 17e24343536472f6. -type widthTrie struct{} - -func newWidthTrie(i int) *widthTrie { - return &widthTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { - switch { - default: - return uint16(widthValues[n<<6+uint32(b)]) - } -} - -// widthValues: 105 blocks, 6720 entries, 13440 bytes -// The third block is the zero block. -var widthValues = [6720]uint16{ - // Block 0x0, offset 0x0 - 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, - 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, - 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, - 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, - 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, - 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, - // Block 0x1, offset 0x40 - 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, - 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, - 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, - 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, - 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, - 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, - 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, - 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, - 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, - 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, - 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, - 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, - 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, - 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, - 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, - 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, - // Block 0x4, offset 0x100 - 0x106: 0x2000, - 0x110: 0x2000, - 0x117: 0x2000, - 0x118: 0x2000, - 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, - 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, - 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, - 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, - 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, - 0x13c: 0x2000, 0x13e: 0x2000, - // Block 0x5, offset 0x140 - 0x141: 0x2000, - 0x151: 0x2000, - 0x153: 0x2000, - 0x15b: 0x2000, - 0x166: 0x2000, 0x167: 0x2000, - 0x16b: 0x2000, - 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, - 0x178: 0x2000, - 0x17f: 0x2000, - // Block 0x6, offset 0x180 - 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, - 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, - 0x18d: 0x2000, - 0x192: 0x2000, 0x193: 0x2000, - 0x1a6: 0x2000, 0x1a7: 0x2000, - 0x1ab: 0x2000, - // Block 0x7, offset 0x1c0 - 0x1ce: 0x2000, 0x1d0: 0x2000, - 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, - 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, - // Block 0x8, offset 0x200 - 0x211: 0x2000, - 0x221: 0x2000, - // Block 0x9, offset 0x240 - 0x244: 0x2000, - 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, - 0x24d: 0x2000, 0x250: 0x2000, - 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, - 0x25f: 0x2000, - // Block 0xa, offset 0x280 - 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, - 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, - 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, - 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, - 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, - 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, - 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, - 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, - 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, - 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, - 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, - 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, - 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, - 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, - 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, - 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, - 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, - 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, - // Block 0xc, offset 0x300 - 0x311: 0x2000, - 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, - 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, - 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, - 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, - 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, - 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, - 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, - // Block 0xd, offset 0x340 - 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, - 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, - // Block 0xe, offset 0x380 - 0x381: 0x2000, - 0x390: 0x2000, 0x391: 0x2000, - 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, - 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, - 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, - 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, - 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, - 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, - 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, - 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, - 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, - 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, - // Block 0x10, offset 0x400 - 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, - 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, - 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, - 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, - 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, - 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, - 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, - 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, - 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, - 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, - 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, - // Block 0x11, offset 0x440 - 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, - 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, - 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, - 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, - 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, - 0x45e: 0x4000, 0x45f: 0x4000, - // Block 0x12, offset 0x480 - 0x490: 0x2000, - 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, - 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, - 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, - 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, - 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, - 0x4bb: 0x2000, - 0x4be: 0x2000, - // Block 0x13, offset 0x4c0 - 0x4f4: 0x2000, - 0x4ff: 0x2000, - // Block 0x14, offset 0x500 - 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, - 0x529: 0xa009, - 0x52c: 0x2000, - // Block 0x15, offset 0x540 - 0x543: 0x2000, 0x545: 0x2000, - 0x549: 0x2000, - 0x553: 0x2000, 0x556: 0x2000, - 0x561: 0x2000, 0x562: 0x2000, - 0x566: 0x2000, - 0x56b: 0x2000, - // Block 0x16, offset 0x580 - 0x593: 0x2000, 0x594: 0x2000, - 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, - 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, - 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, - 0x5aa: 0x2000, 0x5ab: 0x2000, - 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, - 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, - // Block 0x17, offset 0x5c0 - 0x5c9: 0x2000, - 0x5d0: 0x200a, 0x5d1: 0x200b, - 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, - 0x5d8: 0x2000, 0x5d9: 0x2000, - 0x5f8: 0x2000, 0x5f9: 0x2000, - // Block 0x18, offset 0x600 - 0x612: 0x2000, 0x614: 0x2000, - 0x627: 0x2000, - // Block 0x19, offset 0x640 - 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, - 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, - 0x64f: 0x2000, 0x651: 0x2000, - 0x655: 0x2000, - 0x65a: 0x2000, 0x65d: 0x2000, - 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, - 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, - 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, - 0x674: 0x2000, 0x675: 0x2000, - 0x676: 0x2000, 0x677: 0x2000, - 0x67c: 0x2000, 0x67d: 0x2000, - // Block 0x1a, offset 0x680 - 0x688: 0x2000, - 0x68c: 0x2000, - 0x692: 0x2000, - 0x6a0: 0x2000, 0x6a1: 0x2000, - 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, - 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, - // Block 0x1b, offset 0x6c0 - 0x6c2: 0x2000, 0x6c3: 0x2000, - 0x6c6: 0x2000, 0x6c7: 0x2000, - 0x6d5: 0x2000, - 0x6d9: 0x2000, - 0x6e5: 0x2000, - 0x6ff: 0x2000, - // Block 0x1c, offset 0x700 - 0x712: 0x2000, - 0x71a: 0x4000, 0x71b: 0x4000, - 0x729: 0x4000, - 0x72a: 0x4000, - // Block 0x1d, offset 0x740 - 0x769: 0x4000, - 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, - 0x770: 0x4000, 0x773: 0x4000, - // Block 0x1e, offset 0x780 - 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, - 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, - 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, - 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, - 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, - 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, - 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, - 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, - 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, - 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, - 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, - 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, - 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, - 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, - 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, - 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, - // Block 0x20, offset 0x800 - 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, - 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, - 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, - 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, - 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, - 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, - 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, - 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, - 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, - 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, - 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, - // Block 0x21, offset 0x840 - 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, - 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, - 0x850: 0x2000, 0x851: 0x2000, - 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, - 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, - 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, - 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, - 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, - 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, - // Block 0x22, offset 0x880 - 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, - 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, - 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, - 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, - 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, - 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, - 0x8b2: 0x2000, 0x8b3: 0x2000, - 0x8b6: 0x2000, 0x8b7: 0x2000, - 0x8bc: 0x2000, 0x8bd: 0x2000, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x2000, 0x8c1: 0x2000, - 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, - 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, - 0x8e2: 0x2000, 0x8e3: 0x2000, - 0x8e4: 0x2000, 0x8e5: 0x2000, - 0x8ef: 0x2000, - 0x8fd: 0x4000, 0x8fe: 0x4000, - // Block 0x24, offset 0x900 - 0x905: 0x2000, - 0x906: 0x2000, 0x909: 0x2000, - 0x90e: 0x2000, 0x90f: 0x2000, - 0x914: 0x4000, 0x915: 0x4000, - 0x91c: 0x2000, - 0x91e: 0x2000, - // Block 0x25, offset 0x940 - 0x940: 0x2000, 0x942: 0x2000, - 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, - 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, - 0x952: 0x4000, 0x953: 0x4000, - 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, - 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, - 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, - 0x97f: 0x4000, - // Block 0x26, offset 0x980 - 0x993: 0x4000, - 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, - 0x9aa: 0x4000, 0x9ab: 0x4000, - 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, - // Block 0x27, offset 0x9c0 - 0x9c4: 0x4000, 0x9c5: 0x4000, - 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, - 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, - 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, - 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, - 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, - 0x9e8: 0x2000, 0x9e9: 0x2000, - 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, - 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, - 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, - 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, - // Block 0x28, offset 0xa00 - 0xa05: 0x4000, - 0xa0a: 0x4000, 0xa0b: 0x4000, - 0xa28: 0x4000, - 0xa3d: 0x2000, - // Block 0x29, offset 0xa40 - 0xa4c: 0x4000, 0xa4e: 0x4000, - 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, - 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, - 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, - // Block 0x2a, offset 0xa80 - 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, - 0xab0: 0x4000, - 0xabf: 0x4000, - // Block 0x2b, offset 0xac0 - 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, - 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, - // Block 0x2c, offset 0xb00 - 0xb05: 0x6010, - 0xb06: 0x6011, - // Block 0x2d, offset 0xb40 - 0xb5b: 0x4000, 0xb5c: 0x4000, - // Block 0x2e, offset 0xb80 - 0xb90: 0x4000, - 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, - 0xb98: 0x2000, 0xb99: 0x2000, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, - 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, - 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, - 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, - 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, - 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, - 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, - 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, - 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, - 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, - 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, - // Block 0x30, offset 0xc00 - 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, - 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, - 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, - 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, - 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, - 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, - 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, - 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, - 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, - // Block 0x31, offset 0xc40 - 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, - 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, - 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, - 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, - 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, - 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, - // Block 0x32, offset 0xc80 - 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, - 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, - 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, - 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, - 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, - 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, - 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, - 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, - 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, - 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, - 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, - // Block 0x33, offset 0xcc0 - 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, - 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, - 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, - 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, - 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, - 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, - 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, - 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, - 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, - 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, - 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, - // Block 0x34, offset 0xd00 - 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, - 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, - 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, - 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, - 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, - 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, - 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, - 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, - 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, - 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, - 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, - // Block 0x35, offset 0xd40 - 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, - 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, - 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, - 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, - 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, - 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, - 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, - 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, - 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, - 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, - 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, - // Block 0x36, offset 0xd80 - 0xd85: 0x4000, - 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, - 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, - 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, - 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, - 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, - 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, - 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, - 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, - 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, - 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, - 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, - 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, - 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, - 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, - 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, - 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, - 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, - 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, - 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, - 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, - // Block 0x38, offset 0xe00 - 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, - 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, - 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, - 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, - 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, - 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, - 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, - 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, - 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, - 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, 0xe3b: 0x4000, - 0xe3c: 0x4000, 0xe3d: 0x4000, 0xe3e: 0x4000, 0xe3f: 0x4000, - // Block 0x39, offset 0xe40 - 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, - 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, - 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, - 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, - 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, - 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, - 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, - 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, - 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, - // Block 0x3a, offset 0xe80 - 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, - 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, - 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, - 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, - 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, - 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, - 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, - 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, - 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, - 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, - 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, - // Block 0x3b, offset 0xec0 - 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, - 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, - 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, - 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, - 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, - 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, - 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, - 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, - 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, - 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, - 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, - // Block 0x3c, offset 0xf00 - 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, - 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, - 0xf0c: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, - 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, - 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, - 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, - 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, - 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, - 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, - 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, - 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, 0xf3f: 0x4000, - // Block 0x3d, offset 0xf40 - 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, - 0xf46: 0x4000, - // Block 0x3e, offset 0xf80 - 0xfa0: 0x4000, 0xfa1: 0x4000, 0xfa2: 0x4000, 0xfa3: 0x4000, - 0xfa4: 0x4000, 0xfa5: 0x4000, 0xfa6: 0x4000, 0xfa7: 0x4000, 0xfa8: 0x4000, 0xfa9: 0x4000, - 0xfaa: 0x4000, 0xfab: 0x4000, 0xfac: 0x4000, 0xfad: 0x4000, 0xfae: 0x4000, 0xfaf: 0x4000, - 0xfb0: 0x4000, 0xfb1: 0x4000, 0xfb2: 0x4000, 0xfb3: 0x4000, 0xfb4: 0x4000, 0xfb5: 0x4000, - 0xfb6: 0x4000, 0xfb7: 0x4000, 0xfb8: 0x4000, 0xfb9: 0x4000, 0xfba: 0x4000, 0xfbb: 0x4000, - 0xfbc: 0x4000, - // Block 0x3f, offset 0xfc0 - 0xfc0: 0x4000, 0xfc1: 0x4000, 0xfc2: 0x4000, 0xfc3: 0x4000, 0xfc4: 0x4000, 0xfc5: 0x4000, - 0xfc6: 0x4000, 0xfc7: 0x4000, 0xfc8: 0x4000, 0xfc9: 0x4000, 0xfca: 0x4000, 0xfcb: 0x4000, - 0xfcc: 0x4000, 0xfcd: 0x4000, 0xfce: 0x4000, 0xfcf: 0x4000, 0xfd0: 0x4000, 0xfd1: 0x4000, - 0xfd2: 0x4000, 0xfd3: 0x4000, 0xfd4: 0x4000, 0xfd5: 0x4000, 0xfd6: 0x4000, 0xfd7: 0x4000, - 0xfd8: 0x4000, 0xfd9: 0x4000, 0xfda: 0x4000, 0xfdb: 0x4000, 0xfdc: 0x4000, 0xfdd: 0x4000, - 0xfde: 0x4000, 0xfdf: 0x4000, 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, - // Block 0x40, offset 0x1000 - 0x1000: 0x2000, 0x1001: 0x2000, 0x1002: 0x2000, 0x1003: 0x2000, 0x1004: 0x2000, 0x1005: 0x2000, - 0x1006: 0x2000, 0x1007: 0x2000, 0x1008: 0x2000, 0x1009: 0x2000, 0x100a: 0x2000, 0x100b: 0x2000, - 0x100c: 0x2000, 0x100d: 0x2000, 0x100e: 0x2000, 0x100f: 0x2000, 0x1010: 0x4000, 0x1011: 0x4000, - 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, - 0x1018: 0x4000, 0x1019: 0x4000, - 0x1030: 0x4000, 0x1031: 0x4000, 0x1032: 0x4000, 0x1033: 0x4000, 0x1034: 0x4000, 0x1035: 0x4000, - 0x1036: 0x4000, 0x1037: 0x4000, 0x1038: 0x4000, 0x1039: 0x4000, 0x103a: 0x4000, 0x103b: 0x4000, - 0x103c: 0x4000, 0x103d: 0x4000, 0x103e: 0x4000, 0x103f: 0x4000, - // Block 0x41, offset 0x1040 - 0x1040: 0x4000, 0x1041: 0x4000, 0x1042: 0x4000, 0x1043: 0x4000, 0x1044: 0x4000, 0x1045: 0x4000, - 0x1046: 0x4000, 0x1047: 0x4000, 0x1048: 0x4000, 0x1049: 0x4000, 0x104a: 0x4000, 0x104b: 0x4000, - 0x104c: 0x4000, 0x104d: 0x4000, 0x104e: 0x4000, 0x104f: 0x4000, 0x1050: 0x4000, 0x1051: 0x4000, - 0x1052: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, - 0x1058: 0x4000, 0x1059: 0x4000, 0x105a: 0x4000, 0x105b: 0x4000, 0x105c: 0x4000, 0x105d: 0x4000, - 0x105e: 0x4000, 0x105f: 0x4000, 0x1060: 0x4000, 0x1061: 0x4000, 0x1062: 0x4000, 0x1063: 0x4000, - 0x1064: 0x4000, 0x1065: 0x4000, 0x1066: 0x4000, 0x1068: 0x4000, 0x1069: 0x4000, - 0x106a: 0x4000, 0x106b: 0x4000, - // Block 0x42, offset 0x1080 - 0x1081: 0x9012, 0x1082: 0x9012, 0x1083: 0x9012, 0x1084: 0x9012, 0x1085: 0x9012, - 0x1086: 0x9012, 0x1087: 0x9012, 0x1088: 0x9012, 0x1089: 0x9012, 0x108a: 0x9012, 0x108b: 0x9012, - 0x108c: 0x9012, 0x108d: 0x9012, 0x108e: 0x9012, 0x108f: 0x9012, 0x1090: 0x9012, 0x1091: 0x9012, - 0x1092: 0x9012, 0x1093: 0x9012, 0x1094: 0x9012, 0x1095: 0x9012, 0x1096: 0x9012, 0x1097: 0x9012, - 0x1098: 0x9012, 0x1099: 0x9012, 0x109a: 0x9012, 0x109b: 0x9012, 0x109c: 0x9012, 0x109d: 0x9012, - 0x109e: 0x9012, 0x109f: 0x9012, 0x10a0: 0x9049, 0x10a1: 0x9049, 0x10a2: 0x9049, 0x10a3: 0x9049, - 0x10a4: 0x9049, 0x10a5: 0x9049, 0x10a6: 0x9049, 0x10a7: 0x9049, 0x10a8: 0x9049, 0x10a9: 0x9049, - 0x10aa: 0x9049, 0x10ab: 0x9049, 0x10ac: 0x9049, 0x10ad: 0x9049, 0x10ae: 0x9049, 0x10af: 0x9049, - 0x10b0: 0x9049, 0x10b1: 0x9049, 0x10b2: 0x9049, 0x10b3: 0x9049, 0x10b4: 0x9049, 0x10b5: 0x9049, - 0x10b6: 0x9049, 0x10b7: 0x9049, 0x10b8: 0x9049, 0x10b9: 0x9049, 0x10ba: 0x9049, 0x10bb: 0x9049, - 0x10bc: 0x9049, 0x10bd: 0x9049, 0x10be: 0x9049, 0x10bf: 0x9049, - // Block 0x43, offset 0x10c0 - 0x10c0: 0x9049, 0x10c1: 0x9049, 0x10c2: 0x9049, 0x10c3: 0x9049, 0x10c4: 0x9049, 0x10c5: 0x9049, - 0x10c6: 0x9049, 0x10c7: 0x9049, 0x10c8: 0x9049, 0x10c9: 0x9049, 0x10ca: 0x9049, 0x10cb: 0x9049, - 0x10cc: 0x9049, 0x10cd: 0x9049, 0x10ce: 0x9049, 0x10cf: 0x9049, 0x10d0: 0x9049, 0x10d1: 0x9049, - 0x10d2: 0x9049, 0x10d3: 0x9049, 0x10d4: 0x9049, 0x10d5: 0x9049, 0x10d6: 0x9049, 0x10d7: 0x9049, - 0x10d8: 0x9049, 0x10d9: 0x9049, 0x10da: 0x9049, 0x10db: 0x9049, 0x10dc: 0x9049, 0x10dd: 0x9049, - 0x10de: 0x9049, 0x10df: 0x904a, 0x10e0: 0x904b, 0x10e1: 0xb04c, 0x10e2: 0xb04d, 0x10e3: 0xb04d, - 0x10e4: 0xb04e, 0x10e5: 0xb04f, 0x10e6: 0xb050, 0x10e7: 0xb051, 0x10e8: 0xb052, 0x10e9: 0xb053, - 0x10ea: 0xb054, 0x10eb: 0xb055, 0x10ec: 0xb056, 0x10ed: 0xb057, 0x10ee: 0xb058, 0x10ef: 0xb059, - 0x10f0: 0xb05a, 0x10f1: 0xb05b, 0x10f2: 0xb05c, 0x10f3: 0xb05d, 0x10f4: 0xb05e, 0x10f5: 0xb05f, - 0x10f6: 0xb060, 0x10f7: 0xb061, 0x10f8: 0xb062, 0x10f9: 0xb063, 0x10fa: 0xb064, 0x10fb: 0xb065, - 0x10fc: 0xb052, 0x10fd: 0xb066, 0x10fe: 0xb067, 0x10ff: 0xb055, - // Block 0x44, offset 0x1100 - 0x1100: 0xb068, 0x1101: 0xb069, 0x1102: 0xb06a, 0x1103: 0xb06b, 0x1104: 0xb05a, 0x1105: 0xb056, - 0x1106: 0xb06c, 0x1107: 0xb06d, 0x1108: 0xb06b, 0x1109: 0xb06e, 0x110a: 0xb06b, 0x110b: 0xb06f, - 0x110c: 0xb06f, 0x110d: 0xb070, 0x110e: 0xb070, 0x110f: 0xb071, 0x1110: 0xb056, 0x1111: 0xb072, - 0x1112: 0xb073, 0x1113: 0xb072, 0x1114: 0xb074, 0x1115: 0xb073, 0x1116: 0xb075, 0x1117: 0xb075, - 0x1118: 0xb076, 0x1119: 0xb076, 0x111a: 0xb077, 0x111b: 0xb077, 0x111c: 0xb073, 0x111d: 0xb078, - 0x111e: 0xb079, 0x111f: 0xb067, 0x1120: 0xb07a, 0x1121: 0xb07b, 0x1122: 0xb07b, 0x1123: 0xb07b, - 0x1124: 0xb07b, 0x1125: 0xb07b, 0x1126: 0xb07b, 0x1127: 0xb07b, 0x1128: 0xb07b, 0x1129: 0xb07b, - 0x112a: 0xb07b, 0x112b: 0xb07b, 0x112c: 0xb07b, 0x112d: 0xb07b, 0x112e: 0xb07b, 0x112f: 0xb07b, - 0x1130: 0xb07c, 0x1131: 0xb07c, 0x1132: 0xb07c, 0x1133: 0xb07c, 0x1134: 0xb07c, 0x1135: 0xb07c, - 0x1136: 0xb07c, 0x1137: 0xb07c, 0x1138: 0xb07c, 0x1139: 0xb07c, 0x113a: 0xb07c, 0x113b: 0xb07c, - 0x113c: 0xb07c, 0x113d: 0xb07c, 0x113e: 0xb07c, - // Block 0x45, offset 0x1140 - 0x1142: 0xb07d, 0x1143: 0xb07e, 0x1144: 0xb07f, 0x1145: 0xb080, - 0x1146: 0xb07f, 0x1147: 0xb07e, 0x114a: 0xb081, 0x114b: 0xb082, - 0x114c: 0xb083, 0x114d: 0xb07f, 0x114e: 0xb080, 0x114f: 0xb07f, - 0x1152: 0xb084, 0x1153: 0xb085, 0x1154: 0xb084, 0x1155: 0xb086, 0x1156: 0xb084, 0x1157: 0xb087, - 0x115a: 0xb088, 0x115b: 0xb089, 0x115c: 0xb08a, - 0x1160: 0x908b, 0x1161: 0x908b, 0x1162: 0x908c, 0x1163: 0x908d, - 0x1164: 0x908b, 0x1165: 0x908e, 0x1166: 0x908f, 0x1168: 0xb090, 0x1169: 0xb091, - 0x116a: 0xb092, 0x116b: 0xb091, 0x116c: 0xb093, 0x116d: 0xb094, 0x116e: 0xb095, - 0x117d: 0x2000, - // Block 0x46, offset 0x1180 - 0x11a0: 0x4000, 0x11a1: 0x4000, 0x11a2: 0x4000, 0x11a3: 0x4000, - 0x11a4: 0x4000, - 0x11b0: 0x4000, 0x11b1: 0x4000, - // Block 0x47, offset 0x11c0 - 0x11c0: 0x4000, 0x11c1: 0x4000, 0x11c2: 0x4000, 0x11c3: 0x4000, 0x11c4: 0x4000, 0x11c5: 0x4000, - 0x11c6: 0x4000, 0x11c7: 0x4000, 0x11c8: 0x4000, 0x11c9: 0x4000, 0x11ca: 0x4000, 0x11cb: 0x4000, - 0x11cc: 0x4000, 0x11cd: 0x4000, 0x11ce: 0x4000, 0x11cf: 0x4000, 0x11d0: 0x4000, 0x11d1: 0x4000, - 0x11d2: 0x4000, 0x11d3: 0x4000, 0x11d4: 0x4000, 0x11d5: 0x4000, 0x11d6: 0x4000, 0x11d7: 0x4000, - 0x11d8: 0x4000, 0x11d9: 0x4000, 0x11da: 0x4000, 0x11db: 0x4000, 0x11dc: 0x4000, 0x11dd: 0x4000, - 0x11de: 0x4000, 0x11df: 0x4000, 0x11e0: 0x4000, 0x11e1: 0x4000, 0x11e2: 0x4000, 0x11e3: 0x4000, - 0x11e4: 0x4000, 0x11e5: 0x4000, 0x11e6: 0x4000, 0x11e7: 0x4000, 0x11e8: 0x4000, 0x11e9: 0x4000, - 0x11ea: 0x4000, 0x11eb: 0x4000, 0x11ec: 0x4000, 0x11ed: 0x4000, 0x11ee: 0x4000, 0x11ef: 0x4000, - 0x11f0: 0x4000, 0x11f1: 0x4000, 0x11f2: 0x4000, 0x11f3: 0x4000, 0x11f4: 0x4000, 0x11f5: 0x4000, - 0x11f6: 0x4000, 0x11f7: 0x4000, - // Block 0x48, offset 0x1200 - 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, - 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, - 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, - 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, - // Block 0x49, offset 0x1240 - 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, - 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, - // Block 0x4a, offset 0x1280 - 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, - 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, - 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, - 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, - 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, - 0x129e: 0x4000, - // Block 0x4b, offset 0x12c0 - 0x12d0: 0x4000, 0x12d1: 0x4000, - 0x12d2: 0x4000, - 0x12e4: 0x4000, 0x12e5: 0x4000, 0x12e6: 0x4000, 0x12e7: 0x4000, - 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, - 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, - 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, - // Block 0x4c, offset 0x1300 - 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, - 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, - 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, - 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, - 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, - 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, - 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, - 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, - 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, - 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, - // Block 0x4d, offset 0x1340 - 0x1344: 0x4000, - // Block 0x4e, offset 0x1380 - 0x138f: 0x4000, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, - 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, - 0x13d0: 0x2000, 0x13d1: 0x2000, - 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, - 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, - 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, - 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, - 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, - 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, - 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, - 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, - // Block 0x50, offset 0x1400 - 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, - 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, - 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, - 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, - 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, - 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, - 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, - 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, - 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, - 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, - // Block 0x51, offset 0x1440 - 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, - 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, - 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, - 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, - 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, - 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, - 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, - 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, - // Block 0x52, offset 0x1480 - 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, - 0x1490: 0x4000, 0x1491: 0x4000, - 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, - 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, - 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, - 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, - 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, - 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, - 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, - 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, - 0x14d0: 0x4000, 0x14d1: 0x4000, - 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, - 0x14e4: 0x4000, 0x14e5: 0x4000, - // Block 0x54, offset 0x1500 - 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, - 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, - 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, - 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, - 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, - 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, - 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, - 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, - 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, - 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, - // Block 0x55, offset 0x1540 - 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, - 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, - 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, - 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, - 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, - 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, - 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, - 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, - 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, - 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, - 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, - // Block 0x56, offset 0x1580 - 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, - 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, - 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, - 0x1592: 0x4000, 0x1593: 0x4000, - 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, - 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, - 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, - 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, - 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, - 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, - // Block 0x57, offset 0x15c0 - 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, - 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, - 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, - 0x15d2: 0x4000, 0x15d3: 0x4000, - 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, - 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, - 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, - 0x15f0: 0x4000, 0x15f4: 0x4000, - 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, - 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, - // Block 0x58, offset 0x1600 - 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, - 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, - 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, - 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, - 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, - 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, - 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, - 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, - 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, - 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, - 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, - // Block 0x59, offset 0x1640 - 0x1640: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, - 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, - 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, - 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, - 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, - 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, - 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, - 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, - 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, - 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, - 0x167c: 0x4000, 0x167d: 0x4000, 0x167e: 0x4000, 0x167f: 0x4000, - // Block 0x5a, offset 0x1680 - 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, - 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, - 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, - 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, - 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, - 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, - 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, - 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, - 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, - 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, - 0x16bc: 0x4000, 0x16bf: 0x4000, - // Block 0x5b, offset 0x16c0 - 0x16c0: 0x4000, 0x16c1: 0x4000, 0x16c2: 0x4000, 0x16c3: 0x4000, 0x16c4: 0x4000, 0x16c5: 0x4000, - 0x16c6: 0x4000, 0x16c7: 0x4000, 0x16c8: 0x4000, 0x16c9: 0x4000, 0x16ca: 0x4000, 0x16cb: 0x4000, - 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16cf: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, - 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, - 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, - 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, - 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, 0x16e8: 0x4000, 0x16e9: 0x4000, - 0x16ea: 0x4000, 0x16eb: 0x4000, 0x16ec: 0x4000, 0x16ed: 0x4000, 0x16ee: 0x4000, 0x16ef: 0x4000, - 0x16f0: 0x4000, 0x16f1: 0x4000, 0x16f2: 0x4000, 0x16f3: 0x4000, 0x16f4: 0x4000, 0x16f5: 0x4000, - 0x16f6: 0x4000, 0x16f7: 0x4000, 0x16f8: 0x4000, 0x16f9: 0x4000, 0x16fa: 0x4000, 0x16fb: 0x4000, - 0x16fc: 0x4000, 0x16fd: 0x4000, - // Block 0x5c, offset 0x1700 - 0x170b: 0x4000, - 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x1710: 0x4000, 0x1711: 0x4000, - 0x1712: 0x4000, 0x1713: 0x4000, 0x1714: 0x4000, 0x1715: 0x4000, 0x1716: 0x4000, 0x1717: 0x4000, - 0x1718: 0x4000, 0x1719: 0x4000, 0x171a: 0x4000, 0x171b: 0x4000, 0x171c: 0x4000, 0x171d: 0x4000, - 0x171e: 0x4000, 0x171f: 0x4000, 0x1720: 0x4000, 0x1721: 0x4000, 0x1722: 0x4000, 0x1723: 0x4000, - 0x1724: 0x4000, 0x1725: 0x4000, 0x1726: 0x4000, 0x1727: 0x4000, - 0x173a: 0x4000, - // Block 0x5d, offset 0x1740 - 0x1755: 0x4000, 0x1756: 0x4000, - 0x1764: 0x4000, - // Block 0x5e, offset 0x1780 - 0x17bb: 0x4000, - 0x17bc: 0x4000, 0x17bd: 0x4000, 0x17be: 0x4000, 0x17bf: 0x4000, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, - 0x17c6: 0x4000, 0x17c7: 0x4000, 0x17c8: 0x4000, 0x17c9: 0x4000, 0x17ca: 0x4000, 0x17cb: 0x4000, - 0x17cc: 0x4000, 0x17cd: 0x4000, 0x17ce: 0x4000, 0x17cf: 0x4000, - // Block 0x60, offset 0x1800 - 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000, - 0x180c: 0x4000, 0x1810: 0x4000, 0x1811: 0x4000, - 0x1812: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, - 0x182b: 0x4000, 0x182c: 0x4000, - 0x1834: 0x4000, 0x1835: 0x4000, - 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, - 0x183c: 0x4000, - // Block 0x61, offset 0x1840 - 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, - 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, - 0x186a: 0x4000, 0x186b: 0x4000, - // Block 0x62, offset 0x1880 - 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, - 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, - 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, - 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, 0x18a3: 0x4000, - 0x18a4: 0x4000, 0x18a5: 0x4000, 0x18a6: 0x4000, 0x18a7: 0x4000, 0x18a8: 0x4000, 0x18a9: 0x4000, - 0x18aa: 0x4000, 0x18ab: 0x4000, 0x18ac: 0x4000, 0x18ad: 0x4000, 0x18ae: 0x4000, 0x18af: 0x4000, - 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b2: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, - 0x18b6: 0x4000, 0x18b7: 0x4000, 0x18b8: 0x4000, 0x18b9: 0x4000, 0x18ba: 0x4000, - 0x18bc: 0x4000, 0x18bd: 0x4000, 0x18be: 0x4000, 0x18bf: 0x4000, - // Block 0x63, offset 0x18c0 - 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, 0x18c3: 0x4000, 0x18c4: 0x4000, 0x18c5: 0x4000, - 0x18c7: 0x4000, 0x18c8: 0x4000, 0x18c9: 0x4000, 0x18ca: 0x4000, 0x18cb: 0x4000, - 0x18cc: 0x4000, 0x18cd: 0x4000, 0x18ce: 0x4000, 0x18cf: 0x4000, 0x18d0: 0x4000, 0x18d1: 0x4000, - 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, - 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, - 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, - 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, - 0x18ea: 0x4000, 0x18eb: 0x4000, 0x18ec: 0x4000, 0x18ed: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, - 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, - 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, - 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, - // Block 0x64, offset 0x1900 - 0x1900: 0x4000, 0x1901: 0x4000, 0x1902: 0x4000, 0x1903: 0x4000, 0x1904: 0x4000, 0x1905: 0x4000, - 0x1906: 0x4000, 0x1907: 0x4000, 0x1908: 0x4000, 0x1909: 0x4000, 0x190a: 0x4000, 0x190b: 0x4000, - 0x190d: 0x4000, 0x190e: 0x4000, 0x190f: 0x4000, 0x1910: 0x4000, 0x1911: 0x4000, - 0x1912: 0x4000, 0x1913: 0x4000, 0x1914: 0x4000, 0x1915: 0x4000, 0x1916: 0x4000, 0x1917: 0x4000, - 0x1918: 0x4000, 0x1919: 0x4000, 0x191a: 0x4000, 0x191b: 0x4000, 0x191c: 0x4000, 0x191d: 0x4000, - 0x191e: 0x4000, 0x191f: 0x4000, 0x1920: 0x4000, 0x1921: 0x4000, 0x1922: 0x4000, 0x1923: 0x4000, - 0x1924: 0x4000, 0x1925: 0x4000, 0x1926: 0x4000, 0x1927: 0x4000, 0x1928: 0x4000, 0x1929: 0x4000, - 0x192a: 0x4000, 0x192b: 0x4000, 0x192c: 0x4000, 0x192d: 0x4000, 0x192e: 0x4000, 0x192f: 0x4000, - 0x1930: 0x4000, 0x1931: 0x4000, 0x1932: 0x4000, 0x1933: 0x4000, 0x1934: 0x4000, 0x1935: 0x4000, - 0x1936: 0x4000, 0x1937: 0x4000, 0x1938: 0x4000, 0x1939: 0x4000, 0x193a: 0x4000, 0x193b: 0x4000, - 0x193c: 0x4000, 0x193d: 0x4000, 0x193e: 0x4000, 0x193f: 0x4000, - // Block 0x65, offset 0x1940 - 0x1970: 0x4000, 0x1971: 0x4000, 0x1972: 0x4000, 0x1973: 0x4000, 0x1974: 0x4000, - 0x1978: 0x4000, 0x1979: 0x4000, 0x197a: 0x4000, - // Block 0x66, offset 0x1980 - 0x1980: 0x4000, 0x1981: 0x4000, 0x1982: 0x4000, 0x1983: 0x4000, 0x1984: 0x4000, 0x1985: 0x4000, - 0x1986: 0x4000, - 0x1990: 0x4000, 0x1991: 0x4000, - 0x1992: 0x4000, 0x1993: 0x4000, 0x1994: 0x4000, 0x1995: 0x4000, 0x1996: 0x4000, 0x1997: 0x4000, - 0x1998: 0x4000, 0x1999: 0x4000, 0x199a: 0x4000, 0x199b: 0x4000, 0x199c: 0x4000, 0x199d: 0x4000, - 0x199e: 0x4000, 0x199f: 0x4000, 0x19a0: 0x4000, 0x19a1: 0x4000, 0x19a2: 0x4000, 0x19a3: 0x4000, - 0x19a4: 0x4000, 0x19a5: 0x4000, 0x19a6: 0x4000, 0x19a7: 0x4000, 0x19a8: 0x4000, - 0x19b0: 0x4000, 0x19b1: 0x4000, 0x19b2: 0x4000, 0x19b3: 0x4000, 0x19b4: 0x4000, 0x19b5: 0x4000, - 0x19b6: 0x4000, - // Block 0x67, offset 0x19c0 - 0x19c0: 0x4000, 0x19c1: 0x4000, 0x19c2: 0x4000, - 0x19d0: 0x4000, 0x19d1: 0x4000, - 0x19d2: 0x4000, 0x19d3: 0x4000, 0x19d4: 0x4000, 0x19d5: 0x4000, 0x19d6: 0x4000, - // Block 0x68, offset 0x1a00 - 0x1a00: 0x2000, 0x1a01: 0x2000, 0x1a02: 0x2000, 0x1a03: 0x2000, 0x1a04: 0x2000, 0x1a05: 0x2000, - 0x1a06: 0x2000, 0x1a07: 0x2000, 0x1a08: 0x2000, 0x1a09: 0x2000, 0x1a0a: 0x2000, 0x1a0b: 0x2000, - 0x1a0c: 0x2000, 0x1a0d: 0x2000, 0x1a0e: 0x2000, 0x1a0f: 0x2000, 0x1a10: 0x2000, 0x1a11: 0x2000, - 0x1a12: 0x2000, 0x1a13: 0x2000, 0x1a14: 0x2000, 0x1a15: 0x2000, 0x1a16: 0x2000, 0x1a17: 0x2000, - 0x1a18: 0x2000, 0x1a19: 0x2000, 0x1a1a: 0x2000, 0x1a1b: 0x2000, 0x1a1c: 0x2000, 0x1a1d: 0x2000, - 0x1a1e: 0x2000, 0x1a1f: 0x2000, 0x1a20: 0x2000, 0x1a21: 0x2000, 0x1a22: 0x2000, 0x1a23: 0x2000, - 0x1a24: 0x2000, 0x1a25: 0x2000, 0x1a26: 0x2000, 0x1a27: 0x2000, 0x1a28: 0x2000, 0x1a29: 0x2000, - 0x1a2a: 0x2000, 0x1a2b: 0x2000, 0x1a2c: 0x2000, 0x1a2d: 0x2000, 0x1a2e: 0x2000, 0x1a2f: 0x2000, - 0x1a30: 0x2000, 0x1a31: 0x2000, 0x1a32: 0x2000, 0x1a33: 0x2000, 0x1a34: 0x2000, 0x1a35: 0x2000, - 0x1a36: 0x2000, 0x1a37: 0x2000, 0x1a38: 0x2000, 0x1a39: 0x2000, 0x1a3a: 0x2000, 0x1a3b: 0x2000, - 0x1a3c: 0x2000, 0x1a3d: 0x2000, -} - -// widthIndex: 22 blocks, 1408 entries, 1408 bytes -// Block 0 is the zero block. -var widthIndex = [1408]uint8{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, - 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, - 0xd0: 0x0c, 0xd1: 0x0d, - 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, - 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, - 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, - // Block 0x4, offset 0x100 - 0x104: 0x0e, 0x105: 0x0f, - // Block 0x5, offset 0x140 - 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, - 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, - 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, - 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, - 0x166: 0x2a, - 0x16c: 0x2b, 0x16d: 0x2c, - 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, - // Block 0x6, offset 0x180 - 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, - 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x0e, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, - 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, - 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, - 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, - 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, - 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, - 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, - 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, - 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, - 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, - 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, - 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, - 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, - 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, - // Block 0x8, offset 0x200 - 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, - 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, - 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, - 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, - 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, - 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, - 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, - 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, - // Block 0x9, offset 0x240 - 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, - 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, - 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3a, 0x253: 0x3b, - 0x265: 0x3c, - 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, - 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, - // Block 0xa, offset 0x280 - 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, - 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, - 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, - 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3d, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, - 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, - 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, - 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, - 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, - 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, - 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, - 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, - // Block 0xc, offset 0x300 - 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, - 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, - 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, - 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, - 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, - 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, - 0x338: 0x3e, 0x339: 0x3f, 0x33c: 0x40, 0x33d: 0x41, 0x33e: 0x42, 0x33f: 0x43, - // Block 0xd, offset 0x340 - 0x37f: 0x44, - // Block 0xe, offset 0x380 - 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, - 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, - 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, - 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x45, - 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, - 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x0e, 0x3ac: 0x0e, 0x3ad: 0x0e, 0x3ae: 0x0e, 0x3af: 0x0e, - 0x3b0: 0x0e, 0x3b1: 0x0e, 0x3b2: 0x0e, 0x3b3: 0x46, 0x3b4: 0x47, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, - 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, - // Block 0x10, offset 0x400 - 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, - 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, - 0x410: 0x56, 0x411: 0x57, 0x412: 0x0e, 0x413: 0x58, 0x414: 0x59, 0x415: 0x5a, 0x416: 0x5b, 0x417: 0x5c, - 0x418: 0x0e, 0x419: 0x5d, 0x41a: 0x0e, 0x41b: 0x5e, 0x41f: 0x5f, - 0x424: 0x60, 0x425: 0x61, 0x426: 0x0e, 0x427: 0x62, - 0x429: 0x63, 0x42a: 0x64, 0x42b: 0x65, - // Block 0x11, offset 0x440 - 0x456: 0x0b, 0x457: 0x06, - 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, - 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, - 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, - 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, - 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, - // Block 0x12, offset 0x480 - 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, - 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, - 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, - 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, - 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, - 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, - 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, - 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x66, - // Block 0x14, offset 0x500 - 0x520: 0x10, - 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, - 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, - // Block 0x15, offset 0x540 - 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, - 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, -} - -// inverseData contains 4-byte entries of the following format: -// -// <0 padding> -// -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// -// { 0x01, 0xE0, 0x00, 0x00 } -// -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// -// E0 ^ A1 = 41. -// -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// -// E0 ^ A2 = 42. -// -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8. -var inverseData = [150][4]byte{ - {0x00, 0x00, 0x00, 0x00}, - {0x03, 0xe3, 0x80, 0xa0}, - {0x03, 0xef, 0xbc, 0xa0}, - {0x03, 0xef, 0xbc, 0xe0}, - {0x03, 0xef, 0xbd, 0xe0}, - {0x03, 0xef, 0xbf, 0x02}, - {0x03, 0xef, 0xbf, 0x00}, - {0x03, 0xef, 0xbf, 0x0e}, - {0x03, 0xef, 0xbf, 0x0c}, - {0x03, 0xef, 0xbf, 0x0f}, - {0x03, 0xef, 0xbf, 0x39}, - {0x03, 0xef, 0xbf, 0x3b}, - {0x03, 0xef, 0xbf, 0x3f}, - {0x03, 0xef, 0xbf, 0x2a}, - {0x03, 0xef, 0xbf, 0x0d}, - {0x03, 0xef, 0xbf, 0x25}, - {0x03, 0xef, 0xbd, 0x1a}, - {0x03, 0xef, 0xbd, 0x26}, - {0x01, 0xa0, 0x00, 0x00}, - {0x03, 0xef, 0xbd, 0x25}, - {0x03, 0xef, 0xbd, 0x23}, - {0x03, 0xef, 0xbd, 0x2e}, - {0x03, 0xef, 0xbe, 0x07}, - {0x03, 0xef, 0xbe, 0x05}, - {0x03, 0xef, 0xbd, 0x06}, - {0x03, 0xef, 0xbd, 0x13}, - {0x03, 0xef, 0xbd, 0x0b}, - {0x03, 0xef, 0xbd, 0x16}, - {0x03, 0xef, 0xbd, 0x0c}, - {0x03, 0xef, 0xbd, 0x15}, - {0x03, 0xef, 0xbd, 0x0d}, - {0x03, 0xef, 0xbd, 0x1c}, - {0x03, 0xef, 0xbd, 0x02}, - {0x03, 0xef, 0xbd, 0x1f}, - {0x03, 0xef, 0xbd, 0x1d}, - {0x03, 0xef, 0xbd, 0x17}, - {0x03, 0xef, 0xbd, 0x08}, - {0x03, 0xef, 0xbd, 0x09}, - {0x03, 0xef, 0xbd, 0x0e}, - {0x03, 0xef, 0xbd, 0x04}, - {0x03, 0xef, 0xbd, 0x05}, - {0x03, 0xef, 0xbe, 0x3f}, - {0x03, 0xef, 0xbe, 0x00}, - {0x03, 0xef, 0xbd, 0x2c}, - {0x03, 0xef, 0xbe, 0x06}, - {0x03, 0xef, 0xbe, 0x0c}, - {0x03, 0xef, 0xbe, 0x0f}, - {0x03, 0xef, 0xbe, 0x0d}, - {0x03, 0xef, 0xbe, 0x0b}, - {0x03, 0xef, 0xbe, 0x19}, - {0x03, 0xef, 0xbe, 0x15}, - {0x03, 0xef, 0xbe, 0x11}, - {0x03, 0xef, 0xbe, 0x31}, - {0x03, 0xef, 0xbe, 0x33}, - {0x03, 0xef, 0xbd, 0x0f}, - {0x03, 0xef, 0xbe, 0x30}, - {0x03, 0xef, 0xbe, 0x3e}, - {0x03, 0xef, 0xbe, 0x32}, - {0x03, 0xef, 0xbe, 0x36}, - {0x03, 0xef, 0xbd, 0x14}, - {0x03, 0xef, 0xbe, 0x2e}, - {0x03, 0xef, 0xbd, 0x1e}, - {0x03, 0xef, 0xbe, 0x10}, - {0x03, 0xef, 0xbf, 0x13}, - {0x03, 0xef, 0xbf, 0x15}, - {0x03, 0xef, 0xbf, 0x17}, - {0x03, 0xef, 0xbf, 0x1f}, - {0x03, 0xef, 0xbf, 0x1d}, - {0x03, 0xef, 0xbf, 0x1b}, - {0x03, 0xef, 0xbf, 0x09}, - {0x03, 0xef, 0xbf, 0x0b}, - {0x03, 0xef, 0xbf, 0x37}, - {0x03, 0xef, 0xbe, 0x04}, - {0x01, 0xe0, 0x00, 0x00}, - {0x03, 0xe2, 0xa6, 0x1a}, - {0x03, 0xe2, 0xa6, 0x26}, - {0x03, 0xe3, 0x80, 0x23}, - {0x03, 0xe3, 0x80, 0x2e}, - {0x03, 0xe3, 0x80, 0x25}, - {0x03, 0xe3, 0x83, 0x1e}, - {0x03, 0xe3, 0x83, 0x14}, - {0x03, 0xe3, 0x82, 0x06}, - {0x03, 0xe3, 0x82, 0x0b}, - {0x03, 0xe3, 0x82, 0x0c}, - {0x03, 0xe3, 0x82, 0x0d}, - {0x03, 0xe3, 0x82, 0x02}, - {0x03, 0xe3, 0x83, 0x0f}, - {0x03, 0xe3, 0x83, 0x08}, - {0x03, 0xe3, 0x83, 0x09}, - {0x03, 0xe3, 0x83, 0x2c}, - {0x03, 0xe3, 0x83, 0x0c}, - {0x03, 0xe3, 0x82, 0x13}, - {0x03, 0xe3, 0x82, 0x16}, - {0x03, 0xe3, 0x82, 0x15}, - {0x03, 0xe3, 0x82, 0x1c}, - {0x03, 0xe3, 0x82, 0x1f}, - {0x03, 0xe3, 0x82, 0x1d}, - {0x03, 0xe3, 0x82, 0x1a}, - {0x03, 0xe3, 0x82, 0x17}, - {0x03, 0xe3, 0x82, 0x08}, - {0x03, 0xe3, 0x82, 0x09}, - {0x03, 0xe3, 0x82, 0x0e}, - {0x03, 0xe3, 0x82, 0x04}, - {0x03, 0xe3, 0x82, 0x05}, - {0x03, 0xe3, 0x82, 0x3f}, - {0x03, 0xe3, 0x83, 0x00}, - {0x03, 0xe3, 0x83, 0x06}, - {0x03, 0xe3, 0x83, 0x05}, - {0x03, 0xe3, 0x83, 0x0d}, - {0x03, 0xe3, 0x83, 0x0b}, - {0x03, 0xe3, 0x83, 0x07}, - {0x03, 0xe3, 0x83, 0x19}, - {0x03, 0xe3, 0x83, 0x15}, - {0x03, 0xe3, 0x83, 0x11}, - {0x03, 0xe3, 0x83, 0x31}, - {0x03, 0xe3, 0x83, 0x33}, - {0x03, 0xe3, 0x83, 0x30}, - {0x03, 0xe3, 0x83, 0x3e}, - {0x03, 0xe3, 0x83, 0x32}, - {0x03, 0xe3, 0x83, 0x36}, - {0x03, 0xe3, 0x83, 0x2e}, - {0x03, 0xe3, 0x82, 0x07}, - {0x03, 0xe3, 0x85, 0x04}, - {0x03, 0xe3, 0x84, 0x10}, - {0x03, 0xe3, 0x85, 0x30}, - {0x03, 0xe3, 0x85, 0x0d}, - {0x03, 0xe3, 0x85, 0x13}, - {0x03, 0xe3, 0x85, 0x15}, - {0x03, 0xe3, 0x85, 0x17}, - {0x03, 0xe3, 0x85, 0x1f}, - {0x03, 0xe3, 0x85, 0x1d}, - {0x03, 0xe3, 0x85, 0x1b}, - {0x03, 0xe3, 0x85, 0x09}, - {0x03, 0xe3, 0x85, 0x0f}, - {0x03, 0xe3, 0x85, 0x0b}, - {0x03, 0xe3, 0x85, 0x37}, - {0x03, 0xe3, 0x85, 0x3b}, - {0x03, 0xe3, 0x85, 0x39}, - {0x03, 0xe3, 0x85, 0x3f}, - {0x02, 0xc2, 0x02, 0x00}, - {0x02, 0xc2, 0x0e, 0x00}, - {0x02, 0xc2, 0x0c, 0x00}, - {0x02, 0xc2, 0x00, 0x00}, - {0x03, 0xe2, 0x82, 0x0f}, - {0x03, 0xe2, 0x94, 0x2a}, - {0x03, 0xe2, 0x86, 0x39}, - {0x03, 0xe2, 0x86, 0x3b}, - {0x03, 0xe2, 0x86, 0x3f}, - {0x03, 0xe2, 0x96, 0x0d}, - {0x03, 0xe2, 0x97, 0x25}, -} - -// Total table size 15448 bytes (15KiB) diff --git a/vendor/golang.org/x/text/width/tables15.0.0.go b/vendor/golang.org/x/text/width/tables15.0.0.go deleted file mode 100644 index 4b91e3384..000000000 --- a/vendor/golang.org/x/text/width/tables15.0.0.go +++ /dev/null @@ -1,1368 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -//go:build go1.21 -// +build go1.21 - -package width - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "15.0.0" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// widthTrie. Total size: 14912 bytes (14.56 KiB). Checksum: 4468b6cd178303d2. -type widthTrie struct{} - -func newWidthTrie(i int) *widthTrie { - return &widthTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { - switch { - default: - return uint16(widthValues[n<<6+uint32(b)]) - } -} - -// widthValues: 105 blocks, 6720 entries, 13440 bytes -// The third block is the zero block. -var widthValues = [6720]uint16{ - // Block 0x0, offset 0x0 - 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, - 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, - 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, - 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, - 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, - 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, - // Block 0x1, offset 0x40 - 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, - 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, - 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, - 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, - 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, - 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, - 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, - 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, - 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, - 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, - 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, - 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, - 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, - 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, - 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, - 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, - // Block 0x4, offset 0x100 - 0x106: 0x2000, - 0x110: 0x2000, - 0x117: 0x2000, - 0x118: 0x2000, - 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, - 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, - 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, - 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, - 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, - 0x13c: 0x2000, 0x13e: 0x2000, - // Block 0x5, offset 0x140 - 0x141: 0x2000, - 0x151: 0x2000, - 0x153: 0x2000, - 0x15b: 0x2000, - 0x166: 0x2000, 0x167: 0x2000, - 0x16b: 0x2000, - 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, - 0x178: 0x2000, - 0x17f: 0x2000, - // Block 0x6, offset 0x180 - 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, - 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, - 0x18d: 0x2000, - 0x192: 0x2000, 0x193: 0x2000, - 0x1a6: 0x2000, 0x1a7: 0x2000, - 0x1ab: 0x2000, - // Block 0x7, offset 0x1c0 - 0x1ce: 0x2000, 0x1d0: 0x2000, - 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, - 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, - // Block 0x8, offset 0x200 - 0x211: 0x2000, - 0x221: 0x2000, - // Block 0x9, offset 0x240 - 0x244: 0x2000, - 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, - 0x24d: 0x2000, 0x250: 0x2000, - 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, - 0x25f: 0x2000, - // Block 0xa, offset 0x280 - 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, - 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, - 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, - 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, - 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, - 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, - 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, - 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, - 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, - 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, - 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, - 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, - 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, - 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, - 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, - 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, - 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, - 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, - // Block 0xc, offset 0x300 - 0x311: 0x2000, - 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, - 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, - 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, - 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, - 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, - 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, - 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, - // Block 0xd, offset 0x340 - 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, - 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, - // Block 0xe, offset 0x380 - 0x381: 0x2000, - 0x390: 0x2000, 0x391: 0x2000, - 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, - 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, - 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, - 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, - 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, - 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, - 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, - 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, - 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, - 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, - // Block 0x10, offset 0x400 - 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, - 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, - 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, - 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, - 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, - 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, - 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, - 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, - 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, - 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, - 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, - // Block 0x11, offset 0x440 - 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, - 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, - 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, - 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, - 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, - 0x45e: 0x4000, 0x45f: 0x4000, - // Block 0x12, offset 0x480 - 0x490: 0x2000, - 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, - 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, - 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, - 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, - 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, - 0x4bb: 0x2000, - 0x4be: 0x2000, - // Block 0x13, offset 0x4c0 - 0x4f4: 0x2000, - 0x4ff: 0x2000, - // Block 0x14, offset 0x500 - 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, - 0x529: 0xa009, - 0x52c: 0x2000, - // Block 0x15, offset 0x540 - 0x543: 0x2000, 0x545: 0x2000, - 0x549: 0x2000, - 0x553: 0x2000, 0x556: 0x2000, - 0x561: 0x2000, 0x562: 0x2000, - 0x566: 0x2000, - 0x56b: 0x2000, - // Block 0x16, offset 0x580 - 0x593: 0x2000, 0x594: 0x2000, - 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, - 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, - 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, - 0x5aa: 0x2000, 0x5ab: 0x2000, - 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, - 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, - // Block 0x17, offset 0x5c0 - 0x5c9: 0x2000, - 0x5d0: 0x200a, 0x5d1: 0x200b, - 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, - 0x5d8: 0x2000, 0x5d9: 0x2000, - 0x5f8: 0x2000, 0x5f9: 0x2000, - // Block 0x18, offset 0x600 - 0x612: 0x2000, 0x614: 0x2000, - 0x627: 0x2000, - // Block 0x19, offset 0x640 - 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, - 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, - 0x64f: 0x2000, 0x651: 0x2000, - 0x655: 0x2000, - 0x65a: 0x2000, 0x65d: 0x2000, - 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, - 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, - 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, - 0x674: 0x2000, 0x675: 0x2000, - 0x676: 0x2000, 0x677: 0x2000, - 0x67c: 0x2000, 0x67d: 0x2000, - // Block 0x1a, offset 0x680 - 0x688: 0x2000, - 0x68c: 0x2000, - 0x692: 0x2000, - 0x6a0: 0x2000, 0x6a1: 0x2000, - 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, - 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, - // Block 0x1b, offset 0x6c0 - 0x6c2: 0x2000, 0x6c3: 0x2000, - 0x6c6: 0x2000, 0x6c7: 0x2000, - 0x6d5: 0x2000, - 0x6d9: 0x2000, - 0x6e5: 0x2000, - 0x6ff: 0x2000, - // Block 0x1c, offset 0x700 - 0x712: 0x2000, - 0x71a: 0x4000, 0x71b: 0x4000, - 0x729: 0x4000, - 0x72a: 0x4000, - // Block 0x1d, offset 0x740 - 0x769: 0x4000, - 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, - 0x770: 0x4000, 0x773: 0x4000, - // Block 0x1e, offset 0x780 - 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, - 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, - 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, - 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, - 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, - 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, - 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, - 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, - 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, - 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, - 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, - 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, - 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, - 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, - 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, - 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, - // Block 0x20, offset 0x800 - 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, - 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, - 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, - 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, - 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, - 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, - 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, - 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, - 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, - 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, - 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, - // Block 0x21, offset 0x840 - 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, - 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, - 0x850: 0x2000, 0x851: 0x2000, - 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, - 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, - 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, - 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, - 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, - 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, - // Block 0x22, offset 0x880 - 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, - 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, - 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, - 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, - 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, - 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, - 0x8b2: 0x2000, 0x8b3: 0x2000, - 0x8b6: 0x2000, 0x8b7: 0x2000, - 0x8bc: 0x2000, 0x8bd: 0x2000, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x2000, 0x8c1: 0x2000, - 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, - 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, - 0x8e2: 0x2000, 0x8e3: 0x2000, - 0x8e4: 0x2000, 0x8e5: 0x2000, - 0x8ef: 0x2000, - 0x8fd: 0x4000, 0x8fe: 0x4000, - // Block 0x24, offset 0x900 - 0x905: 0x2000, - 0x906: 0x2000, 0x909: 0x2000, - 0x90e: 0x2000, 0x90f: 0x2000, - 0x914: 0x4000, 0x915: 0x4000, - 0x91c: 0x2000, - 0x91e: 0x2000, - // Block 0x25, offset 0x940 - 0x940: 0x2000, 0x942: 0x2000, - 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, - 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, - 0x952: 0x4000, 0x953: 0x4000, - 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, - 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, - 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, - 0x97f: 0x4000, - // Block 0x26, offset 0x980 - 0x993: 0x4000, - 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, - 0x9aa: 0x4000, 0x9ab: 0x4000, - 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, - // Block 0x27, offset 0x9c0 - 0x9c4: 0x4000, 0x9c5: 0x4000, - 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, - 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, - 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, - 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, - 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, - 0x9e8: 0x2000, 0x9e9: 0x2000, - 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, - 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, - 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, - 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, - // Block 0x28, offset 0xa00 - 0xa05: 0x4000, - 0xa0a: 0x4000, 0xa0b: 0x4000, - 0xa28: 0x4000, - 0xa3d: 0x2000, - // Block 0x29, offset 0xa40 - 0xa4c: 0x4000, 0xa4e: 0x4000, - 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, - 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, - 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, - // Block 0x2a, offset 0xa80 - 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, - 0xab0: 0x4000, - 0xabf: 0x4000, - // Block 0x2b, offset 0xac0 - 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, - 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, - // Block 0x2c, offset 0xb00 - 0xb05: 0x6010, - 0xb06: 0x6011, - // Block 0x2d, offset 0xb40 - 0xb5b: 0x4000, 0xb5c: 0x4000, - // Block 0x2e, offset 0xb80 - 0xb90: 0x4000, - 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, - 0xb98: 0x2000, 0xb99: 0x2000, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, - 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, - 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, - 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, - 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, - 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, - 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, - 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, - 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, - 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, - 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, - // Block 0x30, offset 0xc00 - 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, - 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, - 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, - 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, - 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, - 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, - 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, - 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, - 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, - // Block 0x31, offset 0xc40 - 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, - 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, - 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, - 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, - 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, - 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, - // Block 0x32, offset 0xc80 - 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, - 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, - 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, - 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, - 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, - 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, - 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, - 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, - 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, - 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, - 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, - // Block 0x33, offset 0xcc0 - 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, - 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, - 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, - 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, - 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, - 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, - 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, - 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, - 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, - 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, - 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, - // Block 0x34, offset 0xd00 - 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, - 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, - 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, - 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, - 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, - 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, - 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, - 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, - 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, - 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, - 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, - // Block 0x35, offset 0xd40 - 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, - 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, - 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, - 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, - 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, - 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, - 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, - 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, - 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, - 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, - 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, - // Block 0x36, offset 0xd80 - 0xd85: 0x4000, - 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, - 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, - 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, - 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, - 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, - 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, - 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, - 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, - 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, - 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, - 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, - 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, - 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, - 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, - 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, - 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, - 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, - 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, - 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, - 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, - // Block 0x38, offset 0xe00 - 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, - 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, - 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, - 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, - 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, - 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, - 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, - 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, - 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, - 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, 0xe3b: 0x4000, - 0xe3c: 0x4000, 0xe3d: 0x4000, 0xe3e: 0x4000, 0xe3f: 0x4000, - // Block 0x39, offset 0xe40 - 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, - 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, - 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, - 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, - 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, - 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, - 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, - 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, - 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, - // Block 0x3a, offset 0xe80 - 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, - 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, - 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, - 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, - 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, - 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, - 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, - 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, - 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, - 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, - 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, - // Block 0x3b, offset 0xec0 - 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, - 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, - 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, - 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, - 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, - 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, - 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, - 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, - 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, - 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, - 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, - // Block 0x3c, offset 0xf00 - 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, - 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, - 0xf0c: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, - 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, - 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, - 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, - 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, - 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, - 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, - 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, - 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, 0xf3f: 0x4000, - // Block 0x3d, offset 0xf40 - 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, - 0xf46: 0x4000, - // Block 0x3e, offset 0xf80 - 0xfa0: 0x4000, 0xfa1: 0x4000, 0xfa2: 0x4000, 0xfa3: 0x4000, - 0xfa4: 0x4000, 0xfa5: 0x4000, 0xfa6: 0x4000, 0xfa7: 0x4000, 0xfa8: 0x4000, 0xfa9: 0x4000, - 0xfaa: 0x4000, 0xfab: 0x4000, 0xfac: 0x4000, 0xfad: 0x4000, 0xfae: 0x4000, 0xfaf: 0x4000, - 0xfb0: 0x4000, 0xfb1: 0x4000, 0xfb2: 0x4000, 0xfb3: 0x4000, 0xfb4: 0x4000, 0xfb5: 0x4000, - 0xfb6: 0x4000, 0xfb7: 0x4000, 0xfb8: 0x4000, 0xfb9: 0x4000, 0xfba: 0x4000, 0xfbb: 0x4000, - 0xfbc: 0x4000, - // Block 0x3f, offset 0xfc0 - 0xfc0: 0x4000, 0xfc1: 0x4000, 0xfc2: 0x4000, 0xfc3: 0x4000, 0xfc4: 0x4000, 0xfc5: 0x4000, - 0xfc6: 0x4000, 0xfc7: 0x4000, 0xfc8: 0x4000, 0xfc9: 0x4000, 0xfca: 0x4000, 0xfcb: 0x4000, - 0xfcc: 0x4000, 0xfcd: 0x4000, 0xfce: 0x4000, 0xfcf: 0x4000, 0xfd0: 0x4000, 0xfd1: 0x4000, - 0xfd2: 0x4000, 0xfd3: 0x4000, 0xfd4: 0x4000, 0xfd5: 0x4000, 0xfd6: 0x4000, 0xfd7: 0x4000, - 0xfd8: 0x4000, 0xfd9: 0x4000, 0xfda: 0x4000, 0xfdb: 0x4000, 0xfdc: 0x4000, 0xfdd: 0x4000, - 0xfde: 0x4000, 0xfdf: 0x4000, 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, - // Block 0x40, offset 0x1000 - 0x1000: 0x2000, 0x1001: 0x2000, 0x1002: 0x2000, 0x1003: 0x2000, 0x1004: 0x2000, 0x1005: 0x2000, - 0x1006: 0x2000, 0x1007: 0x2000, 0x1008: 0x2000, 0x1009: 0x2000, 0x100a: 0x2000, 0x100b: 0x2000, - 0x100c: 0x2000, 0x100d: 0x2000, 0x100e: 0x2000, 0x100f: 0x2000, 0x1010: 0x4000, 0x1011: 0x4000, - 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, - 0x1018: 0x4000, 0x1019: 0x4000, - 0x1030: 0x4000, 0x1031: 0x4000, 0x1032: 0x4000, 0x1033: 0x4000, 0x1034: 0x4000, 0x1035: 0x4000, - 0x1036: 0x4000, 0x1037: 0x4000, 0x1038: 0x4000, 0x1039: 0x4000, 0x103a: 0x4000, 0x103b: 0x4000, - 0x103c: 0x4000, 0x103d: 0x4000, 0x103e: 0x4000, 0x103f: 0x4000, - // Block 0x41, offset 0x1040 - 0x1040: 0x4000, 0x1041: 0x4000, 0x1042: 0x4000, 0x1043: 0x4000, 0x1044: 0x4000, 0x1045: 0x4000, - 0x1046: 0x4000, 0x1047: 0x4000, 0x1048: 0x4000, 0x1049: 0x4000, 0x104a: 0x4000, 0x104b: 0x4000, - 0x104c: 0x4000, 0x104d: 0x4000, 0x104e: 0x4000, 0x104f: 0x4000, 0x1050: 0x4000, 0x1051: 0x4000, - 0x1052: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, - 0x1058: 0x4000, 0x1059: 0x4000, 0x105a: 0x4000, 0x105b: 0x4000, 0x105c: 0x4000, 0x105d: 0x4000, - 0x105e: 0x4000, 0x105f: 0x4000, 0x1060: 0x4000, 0x1061: 0x4000, 0x1062: 0x4000, 0x1063: 0x4000, - 0x1064: 0x4000, 0x1065: 0x4000, 0x1066: 0x4000, 0x1068: 0x4000, 0x1069: 0x4000, - 0x106a: 0x4000, 0x106b: 0x4000, - // Block 0x42, offset 0x1080 - 0x1081: 0x9012, 0x1082: 0x9012, 0x1083: 0x9012, 0x1084: 0x9012, 0x1085: 0x9012, - 0x1086: 0x9012, 0x1087: 0x9012, 0x1088: 0x9012, 0x1089: 0x9012, 0x108a: 0x9012, 0x108b: 0x9012, - 0x108c: 0x9012, 0x108d: 0x9012, 0x108e: 0x9012, 0x108f: 0x9012, 0x1090: 0x9012, 0x1091: 0x9012, - 0x1092: 0x9012, 0x1093: 0x9012, 0x1094: 0x9012, 0x1095: 0x9012, 0x1096: 0x9012, 0x1097: 0x9012, - 0x1098: 0x9012, 0x1099: 0x9012, 0x109a: 0x9012, 0x109b: 0x9012, 0x109c: 0x9012, 0x109d: 0x9012, - 0x109e: 0x9012, 0x109f: 0x9012, 0x10a0: 0x9049, 0x10a1: 0x9049, 0x10a2: 0x9049, 0x10a3: 0x9049, - 0x10a4: 0x9049, 0x10a5: 0x9049, 0x10a6: 0x9049, 0x10a7: 0x9049, 0x10a8: 0x9049, 0x10a9: 0x9049, - 0x10aa: 0x9049, 0x10ab: 0x9049, 0x10ac: 0x9049, 0x10ad: 0x9049, 0x10ae: 0x9049, 0x10af: 0x9049, - 0x10b0: 0x9049, 0x10b1: 0x9049, 0x10b2: 0x9049, 0x10b3: 0x9049, 0x10b4: 0x9049, 0x10b5: 0x9049, - 0x10b6: 0x9049, 0x10b7: 0x9049, 0x10b8: 0x9049, 0x10b9: 0x9049, 0x10ba: 0x9049, 0x10bb: 0x9049, - 0x10bc: 0x9049, 0x10bd: 0x9049, 0x10be: 0x9049, 0x10bf: 0x9049, - // Block 0x43, offset 0x10c0 - 0x10c0: 0x9049, 0x10c1: 0x9049, 0x10c2: 0x9049, 0x10c3: 0x9049, 0x10c4: 0x9049, 0x10c5: 0x9049, - 0x10c6: 0x9049, 0x10c7: 0x9049, 0x10c8: 0x9049, 0x10c9: 0x9049, 0x10ca: 0x9049, 0x10cb: 0x9049, - 0x10cc: 0x9049, 0x10cd: 0x9049, 0x10ce: 0x9049, 0x10cf: 0x9049, 0x10d0: 0x9049, 0x10d1: 0x9049, - 0x10d2: 0x9049, 0x10d3: 0x9049, 0x10d4: 0x9049, 0x10d5: 0x9049, 0x10d6: 0x9049, 0x10d7: 0x9049, - 0x10d8: 0x9049, 0x10d9: 0x9049, 0x10da: 0x9049, 0x10db: 0x9049, 0x10dc: 0x9049, 0x10dd: 0x9049, - 0x10de: 0x9049, 0x10df: 0x904a, 0x10e0: 0x904b, 0x10e1: 0xb04c, 0x10e2: 0xb04d, 0x10e3: 0xb04d, - 0x10e4: 0xb04e, 0x10e5: 0xb04f, 0x10e6: 0xb050, 0x10e7: 0xb051, 0x10e8: 0xb052, 0x10e9: 0xb053, - 0x10ea: 0xb054, 0x10eb: 0xb055, 0x10ec: 0xb056, 0x10ed: 0xb057, 0x10ee: 0xb058, 0x10ef: 0xb059, - 0x10f0: 0xb05a, 0x10f1: 0xb05b, 0x10f2: 0xb05c, 0x10f3: 0xb05d, 0x10f4: 0xb05e, 0x10f5: 0xb05f, - 0x10f6: 0xb060, 0x10f7: 0xb061, 0x10f8: 0xb062, 0x10f9: 0xb063, 0x10fa: 0xb064, 0x10fb: 0xb065, - 0x10fc: 0xb052, 0x10fd: 0xb066, 0x10fe: 0xb067, 0x10ff: 0xb055, - // Block 0x44, offset 0x1100 - 0x1100: 0xb068, 0x1101: 0xb069, 0x1102: 0xb06a, 0x1103: 0xb06b, 0x1104: 0xb05a, 0x1105: 0xb056, - 0x1106: 0xb06c, 0x1107: 0xb06d, 0x1108: 0xb06b, 0x1109: 0xb06e, 0x110a: 0xb06b, 0x110b: 0xb06f, - 0x110c: 0xb06f, 0x110d: 0xb070, 0x110e: 0xb070, 0x110f: 0xb071, 0x1110: 0xb056, 0x1111: 0xb072, - 0x1112: 0xb073, 0x1113: 0xb072, 0x1114: 0xb074, 0x1115: 0xb073, 0x1116: 0xb075, 0x1117: 0xb075, - 0x1118: 0xb076, 0x1119: 0xb076, 0x111a: 0xb077, 0x111b: 0xb077, 0x111c: 0xb073, 0x111d: 0xb078, - 0x111e: 0xb079, 0x111f: 0xb067, 0x1120: 0xb07a, 0x1121: 0xb07b, 0x1122: 0xb07b, 0x1123: 0xb07b, - 0x1124: 0xb07b, 0x1125: 0xb07b, 0x1126: 0xb07b, 0x1127: 0xb07b, 0x1128: 0xb07b, 0x1129: 0xb07b, - 0x112a: 0xb07b, 0x112b: 0xb07b, 0x112c: 0xb07b, 0x112d: 0xb07b, 0x112e: 0xb07b, 0x112f: 0xb07b, - 0x1130: 0xb07c, 0x1131: 0xb07c, 0x1132: 0xb07c, 0x1133: 0xb07c, 0x1134: 0xb07c, 0x1135: 0xb07c, - 0x1136: 0xb07c, 0x1137: 0xb07c, 0x1138: 0xb07c, 0x1139: 0xb07c, 0x113a: 0xb07c, 0x113b: 0xb07c, - 0x113c: 0xb07c, 0x113d: 0xb07c, 0x113e: 0xb07c, - // Block 0x45, offset 0x1140 - 0x1142: 0xb07d, 0x1143: 0xb07e, 0x1144: 0xb07f, 0x1145: 0xb080, - 0x1146: 0xb07f, 0x1147: 0xb07e, 0x114a: 0xb081, 0x114b: 0xb082, - 0x114c: 0xb083, 0x114d: 0xb07f, 0x114e: 0xb080, 0x114f: 0xb07f, - 0x1152: 0xb084, 0x1153: 0xb085, 0x1154: 0xb084, 0x1155: 0xb086, 0x1156: 0xb084, 0x1157: 0xb087, - 0x115a: 0xb088, 0x115b: 0xb089, 0x115c: 0xb08a, - 0x1160: 0x908b, 0x1161: 0x908b, 0x1162: 0x908c, 0x1163: 0x908d, - 0x1164: 0x908b, 0x1165: 0x908e, 0x1166: 0x908f, 0x1168: 0xb090, 0x1169: 0xb091, - 0x116a: 0xb092, 0x116b: 0xb091, 0x116c: 0xb093, 0x116d: 0xb094, 0x116e: 0xb095, - 0x117d: 0x2000, - // Block 0x46, offset 0x1180 - 0x11a0: 0x4000, 0x11a1: 0x4000, 0x11a2: 0x4000, 0x11a3: 0x4000, - 0x11a4: 0x4000, - 0x11b0: 0x4000, 0x11b1: 0x4000, - // Block 0x47, offset 0x11c0 - 0x11c0: 0x4000, 0x11c1: 0x4000, 0x11c2: 0x4000, 0x11c3: 0x4000, 0x11c4: 0x4000, 0x11c5: 0x4000, - 0x11c6: 0x4000, 0x11c7: 0x4000, 0x11c8: 0x4000, 0x11c9: 0x4000, 0x11ca: 0x4000, 0x11cb: 0x4000, - 0x11cc: 0x4000, 0x11cd: 0x4000, 0x11ce: 0x4000, 0x11cf: 0x4000, 0x11d0: 0x4000, 0x11d1: 0x4000, - 0x11d2: 0x4000, 0x11d3: 0x4000, 0x11d4: 0x4000, 0x11d5: 0x4000, 0x11d6: 0x4000, 0x11d7: 0x4000, - 0x11d8: 0x4000, 0x11d9: 0x4000, 0x11da: 0x4000, 0x11db: 0x4000, 0x11dc: 0x4000, 0x11dd: 0x4000, - 0x11de: 0x4000, 0x11df: 0x4000, 0x11e0: 0x4000, 0x11e1: 0x4000, 0x11e2: 0x4000, 0x11e3: 0x4000, - 0x11e4: 0x4000, 0x11e5: 0x4000, 0x11e6: 0x4000, 0x11e7: 0x4000, 0x11e8: 0x4000, 0x11e9: 0x4000, - 0x11ea: 0x4000, 0x11eb: 0x4000, 0x11ec: 0x4000, 0x11ed: 0x4000, 0x11ee: 0x4000, 0x11ef: 0x4000, - 0x11f0: 0x4000, 0x11f1: 0x4000, 0x11f2: 0x4000, 0x11f3: 0x4000, 0x11f4: 0x4000, 0x11f5: 0x4000, - 0x11f6: 0x4000, 0x11f7: 0x4000, - // Block 0x48, offset 0x1200 - 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, - 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, - 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, - 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, - // Block 0x49, offset 0x1240 - 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, - 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, - // Block 0x4a, offset 0x1280 - 0x12b0: 0x4000, 0x12b1: 0x4000, 0x12b2: 0x4000, 0x12b3: 0x4000, 0x12b5: 0x4000, - 0x12b6: 0x4000, 0x12b7: 0x4000, 0x12b8: 0x4000, 0x12b9: 0x4000, 0x12ba: 0x4000, 0x12bb: 0x4000, - 0x12bd: 0x4000, 0x12be: 0x4000, - // Block 0x4b, offset 0x12c0 - 0x12c0: 0x4000, 0x12c1: 0x4000, 0x12c2: 0x4000, 0x12c3: 0x4000, 0x12c4: 0x4000, 0x12c5: 0x4000, - 0x12c6: 0x4000, 0x12c7: 0x4000, 0x12c8: 0x4000, 0x12c9: 0x4000, 0x12ca: 0x4000, 0x12cb: 0x4000, - 0x12cc: 0x4000, 0x12cd: 0x4000, 0x12ce: 0x4000, 0x12cf: 0x4000, 0x12d0: 0x4000, 0x12d1: 0x4000, - 0x12d2: 0x4000, 0x12d3: 0x4000, 0x12d4: 0x4000, 0x12d5: 0x4000, 0x12d6: 0x4000, 0x12d7: 0x4000, - 0x12d8: 0x4000, 0x12d9: 0x4000, 0x12da: 0x4000, 0x12db: 0x4000, 0x12dc: 0x4000, 0x12dd: 0x4000, - 0x12de: 0x4000, 0x12df: 0x4000, 0x12e0: 0x4000, 0x12e1: 0x4000, 0x12e2: 0x4000, - 0x12f2: 0x4000, - // Block 0x4c, offset 0x1300 - 0x1310: 0x4000, 0x1311: 0x4000, - 0x1312: 0x4000, 0x1315: 0x4000, - 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, - 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, - 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, - 0x133c: 0x4000, 0x133d: 0x4000, 0x133e: 0x4000, 0x133f: 0x4000, - // Block 0x4d, offset 0x1340 - 0x1340: 0x4000, 0x1341: 0x4000, 0x1342: 0x4000, 0x1343: 0x4000, 0x1344: 0x4000, 0x1345: 0x4000, - 0x1346: 0x4000, 0x1347: 0x4000, 0x1348: 0x4000, 0x1349: 0x4000, 0x134a: 0x4000, 0x134b: 0x4000, - 0x134c: 0x4000, 0x134d: 0x4000, 0x134e: 0x4000, 0x134f: 0x4000, 0x1350: 0x4000, 0x1351: 0x4000, - 0x1352: 0x4000, 0x1353: 0x4000, 0x1354: 0x4000, 0x1355: 0x4000, 0x1356: 0x4000, 0x1357: 0x4000, - 0x1358: 0x4000, 0x1359: 0x4000, 0x135a: 0x4000, 0x135b: 0x4000, 0x135c: 0x4000, 0x135d: 0x4000, - 0x135e: 0x4000, 0x135f: 0x4000, 0x1360: 0x4000, 0x1361: 0x4000, 0x1362: 0x4000, 0x1363: 0x4000, - 0x1364: 0x4000, 0x1365: 0x4000, 0x1366: 0x4000, 0x1367: 0x4000, 0x1368: 0x4000, 0x1369: 0x4000, - 0x136a: 0x4000, 0x136b: 0x4000, 0x136c: 0x4000, 0x136d: 0x4000, 0x136e: 0x4000, 0x136f: 0x4000, - 0x1370: 0x4000, 0x1371: 0x4000, 0x1372: 0x4000, 0x1373: 0x4000, 0x1374: 0x4000, 0x1375: 0x4000, - 0x1376: 0x4000, 0x1377: 0x4000, 0x1378: 0x4000, 0x1379: 0x4000, 0x137a: 0x4000, 0x137b: 0x4000, - // Block 0x4e, offset 0x1380 - 0x1384: 0x4000, - // Block 0x4f, offset 0x13c0 - 0x13cf: 0x4000, - // Block 0x50, offset 0x1400 - 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, - 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, - 0x1410: 0x2000, 0x1411: 0x2000, - 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, - 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, - 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, - 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, - 0x142a: 0x2000, 0x142b: 0x2000, 0x142c: 0x2000, 0x142d: 0x2000, - 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, - 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, - 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, - // Block 0x51, offset 0x1440 - 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, - 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, - 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x2000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x2000, - 0x1452: 0x2000, 0x1453: 0x2000, 0x1454: 0x2000, 0x1455: 0x2000, 0x1456: 0x2000, 0x1457: 0x2000, - 0x1458: 0x2000, 0x1459: 0x2000, 0x145a: 0x2000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, - 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, - 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, - 0x1470: 0x2000, 0x1471: 0x2000, 0x1472: 0x2000, 0x1473: 0x2000, 0x1474: 0x2000, 0x1475: 0x2000, - 0x1476: 0x2000, 0x1477: 0x2000, 0x1478: 0x2000, 0x1479: 0x2000, 0x147a: 0x2000, 0x147b: 0x2000, - 0x147c: 0x2000, 0x147d: 0x2000, 0x147e: 0x2000, 0x147f: 0x2000, - // Block 0x52, offset 0x1480 - 0x1480: 0x2000, 0x1481: 0x2000, 0x1482: 0x2000, 0x1483: 0x2000, 0x1484: 0x2000, 0x1485: 0x2000, - 0x1486: 0x2000, 0x1487: 0x2000, 0x1488: 0x2000, 0x1489: 0x2000, 0x148a: 0x2000, 0x148b: 0x2000, - 0x148c: 0x2000, 0x148d: 0x2000, 0x148e: 0x4000, 0x148f: 0x2000, 0x1490: 0x2000, 0x1491: 0x4000, - 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, - 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x2000, 0x149c: 0x2000, 0x149d: 0x2000, - 0x149e: 0x2000, 0x149f: 0x2000, 0x14a0: 0x2000, 0x14a1: 0x2000, 0x14a2: 0x2000, 0x14a3: 0x2000, - 0x14a4: 0x2000, 0x14a5: 0x2000, 0x14a6: 0x2000, 0x14a7: 0x2000, 0x14a8: 0x2000, 0x14a9: 0x2000, - 0x14aa: 0x2000, 0x14ab: 0x2000, 0x14ac: 0x2000, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, - 0x14d0: 0x4000, 0x14d1: 0x4000, - 0x14d2: 0x4000, 0x14d3: 0x4000, 0x14d4: 0x4000, 0x14d5: 0x4000, 0x14d6: 0x4000, 0x14d7: 0x4000, - 0x14d8: 0x4000, 0x14d9: 0x4000, 0x14da: 0x4000, 0x14db: 0x4000, 0x14dc: 0x4000, 0x14dd: 0x4000, - 0x14de: 0x4000, 0x14df: 0x4000, 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, - 0x14e4: 0x4000, 0x14e5: 0x4000, 0x14e6: 0x4000, 0x14e7: 0x4000, 0x14e8: 0x4000, 0x14e9: 0x4000, - 0x14ea: 0x4000, 0x14eb: 0x4000, 0x14ec: 0x4000, 0x14ed: 0x4000, 0x14ee: 0x4000, 0x14ef: 0x4000, - 0x14f0: 0x4000, 0x14f1: 0x4000, 0x14f2: 0x4000, 0x14f3: 0x4000, 0x14f4: 0x4000, 0x14f5: 0x4000, - 0x14f6: 0x4000, 0x14f7: 0x4000, 0x14f8: 0x4000, 0x14f9: 0x4000, 0x14fa: 0x4000, 0x14fb: 0x4000, - // Block 0x54, offset 0x1500 - 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, - 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, - 0x1510: 0x4000, 0x1511: 0x4000, - 0x1520: 0x4000, 0x1521: 0x4000, 0x1522: 0x4000, 0x1523: 0x4000, - 0x1524: 0x4000, 0x1525: 0x4000, - // Block 0x55, offset 0x1540 - 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, - 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, - 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, - 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, - 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, - 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, - 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, - 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, - 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, - 0x157c: 0x4000, 0x157d: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, - // Block 0x56, offset 0x1580 - 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, - 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, - 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, - 0x1592: 0x4000, 0x1593: 0x4000, 0x1594: 0x4000, 0x1595: 0x4000, 0x1596: 0x4000, 0x1597: 0x4000, - 0x1598: 0x4000, 0x1599: 0x4000, 0x159a: 0x4000, 0x159b: 0x4000, 0x159c: 0x4000, 0x159d: 0x4000, - 0x159e: 0x4000, 0x159f: 0x4000, 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, - 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, - 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, - 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, - 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, - 0x15bc: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, - // Block 0x57, offset 0x15c0 - 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, - 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, 0x15cb: 0x4000, - 0x15cc: 0x4000, 0x15cd: 0x4000, 0x15ce: 0x4000, 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, - 0x15d2: 0x4000, 0x15d3: 0x4000, - 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, - 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, - 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, - 0x15f0: 0x4000, 0x15f1: 0x4000, 0x15f2: 0x4000, 0x15f3: 0x4000, 0x15f4: 0x4000, 0x15f5: 0x4000, - 0x15f6: 0x4000, 0x15f7: 0x4000, 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, - 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, - // Block 0x58, offset 0x1600 - 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, - 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, - 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, - 0x1612: 0x4000, 0x1613: 0x4000, - 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, - 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, - 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, - 0x1630: 0x4000, 0x1634: 0x4000, - 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, - 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, - // Block 0x59, offset 0x1640 - 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, - 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, - 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, - 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, - 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, - 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, - 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, - 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, - 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, - 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, - 0x167c: 0x4000, 0x167d: 0x4000, 0x167e: 0x4000, - // Block 0x5a, offset 0x1680 - 0x1680: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, - 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, - 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, - 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, - 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, - 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, - 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, - 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, - 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, - 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, - 0x16bc: 0x4000, 0x16bd: 0x4000, 0x16be: 0x4000, 0x16bf: 0x4000, - // Block 0x5b, offset 0x16c0 - 0x16c0: 0x4000, 0x16c1: 0x4000, 0x16c2: 0x4000, 0x16c3: 0x4000, 0x16c4: 0x4000, 0x16c5: 0x4000, - 0x16c6: 0x4000, 0x16c7: 0x4000, 0x16c8: 0x4000, 0x16c9: 0x4000, 0x16ca: 0x4000, 0x16cb: 0x4000, - 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16cf: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, - 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, - 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, - 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, - 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, 0x16e8: 0x4000, 0x16e9: 0x4000, - 0x16ea: 0x4000, 0x16eb: 0x4000, 0x16ec: 0x4000, 0x16ed: 0x4000, 0x16ee: 0x4000, 0x16ef: 0x4000, - 0x16f0: 0x4000, 0x16f1: 0x4000, 0x16f2: 0x4000, 0x16f3: 0x4000, 0x16f4: 0x4000, 0x16f5: 0x4000, - 0x16f6: 0x4000, 0x16f7: 0x4000, 0x16f8: 0x4000, 0x16f9: 0x4000, 0x16fa: 0x4000, 0x16fb: 0x4000, - 0x16fc: 0x4000, 0x16ff: 0x4000, - // Block 0x5c, offset 0x1700 - 0x1700: 0x4000, 0x1701: 0x4000, 0x1702: 0x4000, 0x1703: 0x4000, 0x1704: 0x4000, 0x1705: 0x4000, - 0x1706: 0x4000, 0x1707: 0x4000, 0x1708: 0x4000, 0x1709: 0x4000, 0x170a: 0x4000, 0x170b: 0x4000, - 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x170f: 0x4000, 0x1710: 0x4000, 0x1711: 0x4000, - 0x1712: 0x4000, 0x1713: 0x4000, 0x1714: 0x4000, 0x1715: 0x4000, 0x1716: 0x4000, 0x1717: 0x4000, - 0x1718: 0x4000, 0x1719: 0x4000, 0x171a: 0x4000, 0x171b: 0x4000, 0x171c: 0x4000, 0x171d: 0x4000, - 0x171e: 0x4000, 0x171f: 0x4000, 0x1720: 0x4000, 0x1721: 0x4000, 0x1722: 0x4000, 0x1723: 0x4000, - 0x1724: 0x4000, 0x1725: 0x4000, 0x1726: 0x4000, 0x1727: 0x4000, 0x1728: 0x4000, 0x1729: 0x4000, - 0x172a: 0x4000, 0x172b: 0x4000, 0x172c: 0x4000, 0x172d: 0x4000, 0x172e: 0x4000, 0x172f: 0x4000, - 0x1730: 0x4000, 0x1731: 0x4000, 0x1732: 0x4000, 0x1733: 0x4000, 0x1734: 0x4000, 0x1735: 0x4000, - 0x1736: 0x4000, 0x1737: 0x4000, 0x1738: 0x4000, 0x1739: 0x4000, 0x173a: 0x4000, 0x173b: 0x4000, - 0x173c: 0x4000, 0x173d: 0x4000, - // Block 0x5d, offset 0x1740 - 0x174b: 0x4000, - 0x174c: 0x4000, 0x174d: 0x4000, 0x174e: 0x4000, 0x1750: 0x4000, 0x1751: 0x4000, - 0x1752: 0x4000, 0x1753: 0x4000, 0x1754: 0x4000, 0x1755: 0x4000, 0x1756: 0x4000, 0x1757: 0x4000, - 0x1758: 0x4000, 0x1759: 0x4000, 0x175a: 0x4000, 0x175b: 0x4000, 0x175c: 0x4000, 0x175d: 0x4000, - 0x175e: 0x4000, 0x175f: 0x4000, 0x1760: 0x4000, 0x1761: 0x4000, 0x1762: 0x4000, 0x1763: 0x4000, - 0x1764: 0x4000, 0x1765: 0x4000, 0x1766: 0x4000, 0x1767: 0x4000, - 0x177a: 0x4000, - // Block 0x5e, offset 0x1780 - 0x1795: 0x4000, 0x1796: 0x4000, - 0x17a4: 0x4000, - // Block 0x5f, offset 0x17c0 - 0x17fb: 0x4000, - 0x17fc: 0x4000, 0x17fd: 0x4000, 0x17fe: 0x4000, 0x17ff: 0x4000, - // Block 0x60, offset 0x1800 - 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000, - 0x1806: 0x4000, 0x1807: 0x4000, 0x1808: 0x4000, 0x1809: 0x4000, 0x180a: 0x4000, 0x180b: 0x4000, - 0x180c: 0x4000, 0x180d: 0x4000, 0x180e: 0x4000, 0x180f: 0x4000, - // Block 0x61, offset 0x1840 - 0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000, - 0x184c: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, - 0x1852: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, - 0x185c: 0x4000, 0x185d: 0x4000, - 0x185e: 0x4000, 0x185f: 0x4000, - 0x186b: 0x4000, 0x186c: 0x4000, - 0x1874: 0x4000, 0x1875: 0x4000, - 0x1876: 0x4000, 0x1877: 0x4000, 0x1878: 0x4000, 0x1879: 0x4000, 0x187a: 0x4000, 0x187b: 0x4000, - 0x187c: 0x4000, - // Block 0x62, offset 0x1880 - 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, 0x18a3: 0x4000, - 0x18a4: 0x4000, 0x18a5: 0x4000, 0x18a6: 0x4000, 0x18a7: 0x4000, 0x18a8: 0x4000, 0x18a9: 0x4000, - 0x18aa: 0x4000, 0x18ab: 0x4000, - 0x18b0: 0x4000, - // Block 0x63, offset 0x18c0 - 0x18cc: 0x4000, 0x18cd: 0x4000, 0x18ce: 0x4000, 0x18cf: 0x4000, 0x18d0: 0x4000, 0x18d1: 0x4000, - 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, - 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, - 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, - 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, - 0x18ea: 0x4000, 0x18eb: 0x4000, 0x18ec: 0x4000, 0x18ed: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, - 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, - 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18f9: 0x4000, 0x18fa: 0x4000, - 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, - // Block 0x64, offset 0x1900 - 0x1900: 0x4000, 0x1901: 0x4000, 0x1902: 0x4000, 0x1903: 0x4000, 0x1904: 0x4000, 0x1905: 0x4000, - 0x1907: 0x4000, 0x1908: 0x4000, 0x1909: 0x4000, 0x190a: 0x4000, 0x190b: 0x4000, - 0x190c: 0x4000, 0x190d: 0x4000, 0x190e: 0x4000, 0x190f: 0x4000, 0x1910: 0x4000, 0x1911: 0x4000, - 0x1912: 0x4000, 0x1913: 0x4000, 0x1914: 0x4000, 0x1915: 0x4000, 0x1916: 0x4000, 0x1917: 0x4000, - 0x1918: 0x4000, 0x1919: 0x4000, 0x191a: 0x4000, 0x191b: 0x4000, 0x191c: 0x4000, 0x191d: 0x4000, - 0x191e: 0x4000, 0x191f: 0x4000, 0x1920: 0x4000, 0x1921: 0x4000, 0x1922: 0x4000, 0x1923: 0x4000, - 0x1924: 0x4000, 0x1925: 0x4000, 0x1926: 0x4000, 0x1927: 0x4000, 0x1928: 0x4000, 0x1929: 0x4000, - 0x192a: 0x4000, 0x192b: 0x4000, 0x192c: 0x4000, 0x192d: 0x4000, 0x192e: 0x4000, 0x192f: 0x4000, - 0x1930: 0x4000, 0x1931: 0x4000, 0x1932: 0x4000, 0x1933: 0x4000, 0x1934: 0x4000, 0x1935: 0x4000, - 0x1936: 0x4000, 0x1937: 0x4000, 0x1938: 0x4000, 0x1939: 0x4000, 0x193a: 0x4000, 0x193b: 0x4000, - 0x193c: 0x4000, 0x193d: 0x4000, 0x193e: 0x4000, 0x193f: 0x4000, - // Block 0x65, offset 0x1940 - 0x1970: 0x4000, 0x1971: 0x4000, 0x1972: 0x4000, 0x1973: 0x4000, 0x1974: 0x4000, 0x1975: 0x4000, - 0x1976: 0x4000, 0x1977: 0x4000, 0x1978: 0x4000, 0x1979: 0x4000, 0x197a: 0x4000, 0x197b: 0x4000, - 0x197c: 0x4000, - // Block 0x66, offset 0x1980 - 0x1980: 0x4000, 0x1981: 0x4000, 0x1982: 0x4000, 0x1983: 0x4000, 0x1984: 0x4000, 0x1985: 0x4000, - 0x1986: 0x4000, 0x1987: 0x4000, 0x1988: 0x4000, - 0x1990: 0x4000, 0x1991: 0x4000, - 0x1992: 0x4000, 0x1993: 0x4000, 0x1994: 0x4000, 0x1995: 0x4000, 0x1996: 0x4000, 0x1997: 0x4000, - 0x1998: 0x4000, 0x1999: 0x4000, 0x199a: 0x4000, 0x199b: 0x4000, 0x199c: 0x4000, 0x199d: 0x4000, - 0x199e: 0x4000, 0x199f: 0x4000, 0x19a0: 0x4000, 0x19a1: 0x4000, 0x19a2: 0x4000, 0x19a3: 0x4000, - 0x19a4: 0x4000, 0x19a5: 0x4000, 0x19a6: 0x4000, 0x19a7: 0x4000, 0x19a8: 0x4000, 0x19a9: 0x4000, - 0x19aa: 0x4000, 0x19ab: 0x4000, 0x19ac: 0x4000, 0x19ad: 0x4000, 0x19ae: 0x4000, 0x19af: 0x4000, - 0x19b0: 0x4000, 0x19b1: 0x4000, 0x19b2: 0x4000, 0x19b3: 0x4000, 0x19b4: 0x4000, 0x19b5: 0x4000, - 0x19b6: 0x4000, 0x19b7: 0x4000, 0x19b8: 0x4000, 0x19b9: 0x4000, 0x19ba: 0x4000, 0x19bb: 0x4000, - 0x19bc: 0x4000, 0x19bd: 0x4000, 0x19bf: 0x4000, - // Block 0x67, offset 0x19c0 - 0x19c0: 0x4000, 0x19c1: 0x4000, 0x19c2: 0x4000, 0x19c3: 0x4000, 0x19c4: 0x4000, 0x19c5: 0x4000, - 0x19ce: 0x4000, 0x19cf: 0x4000, 0x19d0: 0x4000, 0x19d1: 0x4000, - 0x19d2: 0x4000, 0x19d3: 0x4000, 0x19d4: 0x4000, 0x19d5: 0x4000, 0x19d6: 0x4000, 0x19d7: 0x4000, - 0x19d8: 0x4000, 0x19d9: 0x4000, 0x19da: 0x4000, 0x19db: 0x4000, - 0x19e0: 0x4000, 0x19e1: 0x4000, 0x19e2: 0x4000, 0x19e3: 0x4000, - 0x19e4: 0x4000, 0x19e5: 0x4000, 0x19e6: 0x4000, 0x19e7: 0x4000, 0x19e8: 0x4000, - 0x19f0: 0x4000, 0x19f1: 0x4000, 0x19f2: 0x4000, 0x19f3: 0x4000, 0x19f4: 0x4000, 0x19f5: 0x4000, - 0x19f6: 0x4000, 0x19f7: 0x4000, 0x19f8: 0x4000, - // Block 0x68, offset 0x1a00 - 0x1a00: 0x2000, 0x1a01: 0x2000, 0x1a02: 0x2000, 0x1a03: 0x2000, 0x1a04: 0x2000, 0x1a05: 0x2000, - 0x1a06: 0x2000, 0x1a07: 0x2000, 0x1a08: 0x2000, 0x1a09: 0x2000, 0x1a0a: 0x2000, 0x1a0b: 0x2000, - 0x1a0c: 0x2000, 0x1a0d: 0x2000, 0x1a0e: 0x2000, 0x1a0f: 0x2000, 0x1a10: 0x2000, 0x1a11: 0x2000, - 0x1a12: 0x2000, 0x1a13: 0x2000, 0x1a14: 0x2000, 0x1a15: 0x2000, 0x1a16: 0x2000, 0x1a17: 0x2000, - 0x1a18: 0x2000, 0x1a19: 0x2000, 0x1a1a: 0x2000, 0x1a1b: 0x2000, 0x1a1c: 0x2000, 0x1a1d: 0x2000, - 0x1a1e: 0x2000, 0x1a1f: 0x2000, 0x1a20: 0x2000, 0x1a21: 0x2000, 0x1a22: 0x2000, 0x1a23: 0x2000, - 0x1a24: 0x2000, 0x1a25: 0x2000, 0x1a26: 0x2000, 0x1a27: 0x2000, 0x1a28: 0x2000, 0x1a29: 0x2000, - 0x1a2a: 0x2000, 0x1a2b: 0x2000, 0x1a2c: 0x2000, 0x1a2d: 0x2000, 0x1a2e: 0x2000, 0x1a2f: 0x2000, - 0x1a30: 0x2000, 0x1a31: 0x2000, 0x1a32: 0x2000, 0x1a33: 0x2000, 0x1a34: 0x2000, 0x1a35: 0x2000, - 0x1a36: 0x2000, 0x1a37: 0x2000, 0x1a38: 0x2000, 0x1a39: 0x2000, 0x1a3a: 0x2000, 0x1a3b: 0x2000, - 0x1a3c: 0x2000, 0x1a3d: 0x2000, -} - -// widthIndex: 23 blocks, 1472 entries, 1472 bytes -// Block 0 is the zero block. -var widthIndex = [1472]uint8{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, - 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, - 0xd0: 0x0c, 0xd1: 0x0d, - 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, - 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, - 0xf0: 0x10, 0xf3: 0x13, 0xf4: 0x14, - // Block 0x4, offset 0x100 - 0x104: 0x0e, 0x105: 0x0f, - // Block 0x5, offset 0x140 - 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, - 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, - 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, - 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, - 0x166: 0x2a, - 0x16c: 0x2b, 0x16d: 0x2c, - 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, - // Block 0x6, offset 0x180 - 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, - 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x0e, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, - 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, - 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, - 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, - 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, - 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, - 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, - 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, - 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, - 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, - 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, - 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, - 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, - 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, - // Block 0x8, offset 0x200 - 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, - 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, - 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, - 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, - 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, - 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, - 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, - 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, - // Block 0x9, offset 0x240 - 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, - 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, - 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3a, 0x253: 0x3b, - 0x265: 0x3c, - 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, - 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, - // Block 0xa, offset 0x280 - 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, - 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, - 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, - 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3d, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, - 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, - 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, - 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, - 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, - 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, - 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, - 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, - // Block 0xc, offset 0x300 - 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, - 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, - 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, - 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, - 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, - 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, - 0x338: 0x3e, 0x339: 0x3f, 0x33c: 0x40, 0x33d: 0x41, 0x33e: 0x42, 0x33f: 0x43, - // Block 0xd, offset 0x340 - 0x37f: 0x44, - // Block 0xe, offset 0x380 - 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, - 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, - 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, - 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x45, - 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, - 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x0e, 0x3ac: 0x0e, 0x3ad: 0x0e, 0x3ae: 0x0e, 0x3af: 0x0e, - 0x3b0: 0x0e, 0x3b1: 0x0e, 0x3b2: 0x0e, 0x3b3: 0x46, 0x3b4: 0x47, - // Block 0xf, offset 0x3c0 - 0x3ff: 0x48, - // Block 0x10, offset 0x400 - 0x400: 0x0e, 0x401: 0x0e, 0x402: 0x0e, 0x403: 0x0e, 0x404: 0x49, 0x405: 0x4a, 0x406: 0x0e, 0x407: 0x0e, - 0x408: 0x0e, 0x409: 0x0e, 0x40a: 0x0e, 0x40b: 0x4b, - // Block 0x11, offset 0x440 - 0x440: 0x4c, 0x443: 0x4d, 0x444: 0x4e, 0x445: 0x4f, 0x446: 0x50, - 0x448: 0x51, 0x449: 0x52, 0x44c: 0x53, 0x44d: 0x54, 0x44e: 0x55, 0x44f: 0x56, - 0x450: 0x57, 0x451: 0x58, 0x452: 0x0e, 0x453: 0x59, 0x454: 0x5a, 0x455: 0x5b, 0x456: 0x5c, 0x457: 0x5d, - 0x458: 0x0e, 0x459: 0x5e, 0x45a: 0x0e, 0x45b: 0x5f, 0x45f: 0x60, - 0x464: 0x61, 0x465: 0x62, 0x466: 0x0e, 0x467: 0x0e, - 0x469: 0x63, 0x46a: 0x64, 0x46b: 0x65, - // Block 0x12, offset 0x480 - 0x496: 0x0b, 0x497: 0x06, - 0x498: 0x0c, 0x49a: 0x0d, 0x49b: 0x0e, 0x49f: 0x0f, - 0x4a0: 0x06, 0x4a1: 0x06, 0x4a2: 0x06, 0x4a3: 0x06, 0x4a4: 0x06, 0x4a5: 0x06, 0x4a6: 0x06, 0x4a7: 0x06, - 0x4a8: 0x06, 0x4a9: 0x06, 0x4aa: 0x06, 0x4ab: 0x06, 0x4ac: 0x06, 0x4ad: 0x06, 0x4ae: 0x06, 0x4af: 0x06, - 0x4b0: 0x06, 0x4b1: 0x06, 0x4b2: 0x06, 0x4b3: 0x06, 0x4b4: 0x06, 0x4b5: 0x06, 0x4b6: 0x06, 0x4b7: 0x06, - 0x4b8: 0x06, 0x4b9: 0x06, 0x4ba: 0x06, 0x4bb: 0x06, 0x4bc: 0x06, 0x4bd: 0x06, 0x4be: 0x06, 0x4bf: 0x06, - // Block 0x13, offset 0x4c0 - 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x09, - // Block 0x14, offset 0x500 - 0x500: 0x08, 0x501: 0x08, 0x502: 0x08, 0x503: 0x08, 0x504: 0x08, 0x505: 0x08, 0x506: 0x08, 0x507: 0x08, - 0x508: 0x08, 0x509: 0x08, 0x50a: 0x08, 0x50b: 0x08, 0x50c: 0x08, 0x50d: 0x08, 0x50e: 0x08, 0x50f: 0x08, - 0x510: 0x08, 0x511: 0x08, 0x512: 0x08, 0x513: 0x08, 0x514: 0x08, 0x515: 0x08, 0x516: 0x08, 0x517: 0x08, - 0x518: 0x08, 0x519: 0x08, 0x51a: 0x08, 0x51b: 0x08, 0x51c: 0x08, 0x51d: 0x08, 0x51e: 0x08, 0x51f: 0x08, - 0x520: 0x08, 0x521: 0x08, 0x522: 0x08, 0x523: 0x08, 0x524: 0x08, 0x525: 0x08, 0x526: 0x08, 0x527: 0x08, - 0x528: 0x08, 0x529: 0x08, 0x52a: 0x08, 0x52b: 0x08, 0x52c: 0x08, 0x52d: 0x08, 0x52e: 0x08, 0x52f: 0x08, - 0x530: 0x08, 0x531: 0x08, 0x532: 0x08, 0x533: 0x08, 0x534: 0x08, 0x535: 0x08, 0x536: 0x08, 0x537: 0x08, - 0x538: 0x08, 0x539: 0x08, 0x53a: 0x08, 0x53b: 0x08, 0x53c: 0x08, 0x53d: 0x08, 0x53e: 0x08, 0x53f: 0x66, - // Block 0x15, offset 0x540 - 0x560: 0x11, - 0x570: 0x09, 0x571: 0x09, 0x572: 0x09, 0x573: 0x09, 0x574: 0x09, 0x575: 0x09, 0x576: 0x09, 0x577: 0x09, - 0x578: 0x09, 0x579: 0x09, 0x57a: 0x09, 0x57b: 0x09, 0x57c: 0x09, 0x57d: 0x09, 0x57e: 0x09, 0x57f: 0x12, - // Block 0x16, offset 0x580 - 0x580: 0x09, 0x581: 0x09, 0x582: 0x09, 0x583: 0x09, 0x584: 0x09, 0x585: 0x09, 0x586: 0x09, 0x587: 0x09, - 0x588: 0x09, 0x589: 0x09, 0x58a: 0x09, 0x58b: 0x09, 0x58c: 0x09, 0x58d: 0x09, 0x58e: 0x09, 0x58f: 0x12, -} - -// inverseData contains 4-byte entries of the following format: -// -// <0 padding> -// -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// -// { 0x01, 0xE0, 0x00, 0x00 } -// -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// -// E0 ^ A1 = 41. -// -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// -// E0 ^ A2 = 42. -// -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8. -var inverseData = [150][4]byte{ - {0x00, 0x00, 0x00, 0x00}, - {0x03, 0xe3, 0x80, 0xa0}, - {0x03, 0xef, 0xbc, 0xa0}, - {0x03, 0xef, 0xbc, 0xe0}, - {0x03, 0xef, 0xbd, 0xe0}, - {0x03, 0xef, 0xbf, 0x02}, - {0x03, 0xef, 0xbf, 0x00}, - {0x03, 0xef, 0xbf, 0x0e}, - {0x03, 0xef, 0xbf, 0x0c}, - {0x03, 0xef, 0xbf, 0x0f}, - {0x03, 0xef, 0xbf, 0x39}, - {0x03, 0xef, 0xbf, 0x3b}, - {0x03, 0xef, 0xbf, 0x3f}, - {0x03, 0xef, 0xbf, 0x2a}, - {0x03, 0xef, 0xbf, 0x0d}, - {0x03, 0xef, 0xbf, 0x25}, - {0x03, 0xef, 0xbd, 0x1a}, - {0x03, 0xef, 0xbd, 0x26}, - {0x01, 0xa0, 0x00, 0x00}, - {0x03, 0xef, 0xbd, 0x25}, - {0x03, 0xef, 0xbd, 0x23}, - {0x03, 0xef, 0xbd, 0x2e}, - {0x03, 0xef, 0xbe, 0x07}, - {0x03, 0xef, 0xbe, 0x05}, - {0x03, 0xef, 0xbd, 0x06}, - {0x03, 0xef, 0xbd, 0x13}, - {0x03, 0xef, 0xbd, 0x0b}, - {0x03, 0xef, 0xbd, 0x16}, - {0x03, 0xef, 0xbd, 0x0c}, - {0x03, 0xef, 0xbd, 0x15}, - {0x03, 0xef, 0xbd, 0x0d}, - {0x03, 0xef, 0xbd, 0x1c}, - {0x03, 0xef, 0xbd, 0x02}, - {0x03, 0xef, 0xbd, 0x1f}, - {0x03, 0xef, 0xbd, 0x1d}, - {0x03, 0xef, 0xbd, 0x17}, - {0x03, 0xef, 0xbd, 0x08}, - {0x03, 0xef, 0xbd, 0x09}, - {0x03, 0xef, 0xbd, 0x0e}, - {0x03, 0xef, 0xbd, 0x04}, - {0x03, 0xef, 0xbd, 0x05}, - {0x03, 0xef, 0xbe, 0x3f}, - {0x03, 0xef, 0xbe, 0x00}, - {0x03, 0xef, 0xbd, 0x2c}, - {0x03, 0xef, 0xbe, 0x06}, - {0x03, 0xef, 0xbe, 0x0c}, - {0x03, 0xef, 0xbe, 0x0f}, - {0x03, 0xef, 0xbe, 0x0d}, - {0x03, 0xef, 0xbe, 0x0b}, - {0x03, 0xef, 0xbe, 0x19}, - {0x03, 0xef, 0xbe, 0x15}, - {0x03, 0xef, 0xbe, 0x11}, - {0x03, 0xef, 0xbe, 0x31}, - {0x03, 0xef, 0xbe, 0x33}, - {0x03, 0xef, 0xbd, 0x0f}, - {0x03, 0xef, 0xbe, 0x30}, - {0x03, 0xef, 0xbe, 0x3e}, - {0x03, 0xef, 0xbe, 0x32}, - {0x03, 0xef, 0xbe, 0x36}, - {0x03, 0xef, 0xbd, 0x14}, - {0x03, 0xef, 0xbe, 0x2e}, - {0x03, 0xef, 0xbd, 0x1e}, - {0x03, 0xef, 0xbe, 0x10}, - {0x03, 0xef, 0xbf, 0x13}, - {0x03, 0xef, 0xbf, 0x15}, - {0x03, 0xef, 0xbf, 0x17}, - {0x03, 0xef, 0xbf, 0x1f}, - {0x03, 0xef, 0xbf, 0x1d}, - {0x03, 0xef, 0xbf, 0x1b}, - {0x03, 0xef, 0xbf, 0x09}, - {0x03, 0xef, 0xbf, 0x0b}, - {0x03, 0xef, 0xbf, 0x37}, - {0x03, 0xef, 0xbe, 0x04}, - {0x01, 0xe0, 0x00, 0x00}, - {0x03, 0xe2, 0xa6, 0x1a}, - {0x03, 0xe2, 0xa6, 0x26}, - {0x03, 0xe3, 0x80, 0x23}, - {0x03, 0xe3, 0x80, 0x2e}, - {0x03, 0xe3, 0x80, 0x25}, - {0x03, 0xe3, 0x83, 0x1e}, - {0x03, 0xe3, 0x83, 0x14}, - {0x03, 0xe3, 0x82, 0x06}, - {0x03, 0xe3, 0x82, 0x0b}, - {0x03, 0xe3, 0x82, 0x0c}, - {0x03, 0xe3, 0x82, 0x0d}, - {0x03, 0xe3, 0x82, 0x02}, - {0x03, 0xe3, 0x83, 0x0f}, - {0x03, 0xe3, 0x83, 0x08}, - {0x03, 0xe3, 0x83, 0x09}, - {0x03, 0xe3, 0x83, 0x2c}, - {0x03, 0xe3, 0x83, 0x0c}, - {0x03, 0xe3, 0x82, 0x13}, - {0x03, 0xe3, 0x82, 0x16}, - {0x03, 0xe3, 0x82, 0x15}, - {0x03, 0xe3, 0x82, 0x1c}, - {0x03, 0xe3, 0x82, 0x1f}, - {0x03, 0xe3, 0x82, 0x1d}, - {0x03, 0xe3, 0x82, 0x1a}, - {0x03, 0xe3, 0x82, 0x17}, - {0x03, 0xe3, 0x82, 0x08}, - {0x03, 0xe3, 0x82, 0x09}, - {0x03, 0xe3, 0x82, 0x0e}, - {0x03, 0xe3, 0x82, 0x04}, - {0x03, 0xe3, 0x82, 0x05}, - {0x03, 0xe3, 0x82, 0x3f}, - {0x03, 0xe3, 0x83, 0x00}, - {0x03, 0xe3, 0x83, 0x06}, - {0x03, 0xe3, 0x83, 0x05}, - {0x03, 0xe3, 0x83, 0x0d}, - {0x03, 0xe3, 0x83, 0x0b}, - {0x03, 0xe3, 0x83, 0x07}, - {0x03, 0xe3, 0x83, 0x19}, - {0x03, 0xe3, 0x83, 0x15}, - {0x03, 0xe3, 0x83, 0x11}, - {0x03, 0xe3, 0x83, 0x31}, - {0x03, 0xe3, 0x83, 0x33}, - {0x03, 0xe3, 0x83, 0x30}, - {0x03, 0xe3, 0x83, 0x3e}, - {0x03, 0xe3, 0x83, 0x32}, - {0x03, 0xe3, 0x83, 0x36}, - {0x03, 0xe3, 0x83, 0x2e}, - {0x03, 0xe3, 0x82, 0x07}, - {0x03, 0xe3, 0x85, 0x04}, - {0x03, 0xe3, 0x84, 0x10}, - {0x03, 0xe3, 0x85, 0x30}, - {0x03, 0xe3, 0x85, 0x0d}, - {0x03, 0xe3, 0x85, 0x13}, - {0x03, 0xe3, 0x85, 0x15}, - {0x03, 0xe3, 0x85, 0x17}, - {0x03, 0xe3, 0x85, 0x1f}, - {0x03, 0xe3, 0x85, 0x1d}, - {0x03, 0xe3, 0x85, 0x1b}, - {0x03, 0xe3, 0x85, 0x09}, - {0x03, 0xe3, 0x85, 0x0f}, - {0x03, 0xe3, 0x85, 0x0b}, - {0x03, 0xe3, 0x85, 0x37}, - {0x03, 0xe3, 0x85, 0x3b}, - {0x03, 0xe3, 0x85, 0x39}, - {0x03, 0xe3, 0x85, 0x3f}, - {0x02, 0xc2, 0x02, 0x00}, - {0x02, 0xc2, 0x0e, 0x00}, - {0x02, 0xc2, 0x0c, 0x00}, - {0x02, 0xc2, 0x00, 0x00}, - {0x03, 0xe2, 0x82, 0x0f}, - {0x03, 0xe2, 0x94, 0x2a}, - {0x03, 0xe2, 0x86, 0x39}, - {0x03, 0xe2, 0x86, 0x3b}, - {0x03, 0xe2, 0x86, 0x3f}, - {0x03, 0xe2, 0x96, 0x0d}, - {0x03, 0xe2, 0x97, 0x25}, -} - -// Total table size 15512 bytes (15KiB) diff --git a/vendor/golang.org/x/text/width/tables9.0.0.go b/vendor/golang.org/x/text/width/tables9.0.0.go deleted file mode 100644 index 6781f3d96..000000000 --- a/vendor/golang.org/x/text/width/tables9.0.0.go +++ /dev/null @@ -1,1297 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -//go:build !go1.10 -// +build !go1.10 - -package width - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "9.0.0" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// widthTrie. Total size: 14080 bytes (13.75 KiB). Checksum: 3b8aeb3dc03667a3. -type widthTrie struct{} - -func newWidthTrie(i int) *widthTrie { - return &widthTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { - switch { - default: - return uint16(widthValues[n<<6+uint32(b)]) - } -} - -// widthValues: 99 blocks, 6336 entries, 12672 bytes -// The third block is the zero block. -var widthValues = [6336]uint16{ - // Block 0x0, offset 0x0 - 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, - 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, - 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, - 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, - 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, - 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, - // Block 0x1, offset 0x40 - 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, - 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, - 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, - 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, - 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, - 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, - 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, - 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, - 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, - 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, - 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, - 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, - 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, - 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, - 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, - 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, - // Block 0x4, offset 0x100 - 0x106: 0x2000, - 0x110: 0x2000, - 0x117: 0x2000, - 0x118: 0x2000, - 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, - 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, - 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, - 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, - 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, - 0x13c: 0x2000, 0x13e: 0x2000, - // Block 0x5, offset 0x140 - 0x141: 0x2000, - 0x151: 0x2000, - 0x153: 0x2000, - 0x15b: 0x2000, - 0x166: 0x2000, 0x167: 0x2000, - 0x16b: 0x2000, - 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, - 0x178: 0x2000, - 0x17f: 0x2000, - // Block 0x6, offset 0x180 - 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, - 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, - 0x18d: 0x2000, - 0x192: 0x2000, 0x193: 0x2000, - 0x1a6: 0x2000, 0x1a7: 0x2000, - 0x1ab: 0x2000, - // Block 0x7, offset 0x1c0 - 0x1ce: 0x2000, 0x1d0: 0x2000, - 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, - 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, - // Block 0x8, offset 0x200 - 0x211: 0x2000, - 0x221: 0x2000, - // Block 0x9, offset 0x240 - 0x244: 0x2000, - 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, - 0x24d: 0x2000, 0x250: 0x2000, - 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, - 0x25f: 0x2000, - // Block 0xa, offset 0x280 - 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, - 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, - 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, - 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, - 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, - 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, - 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, - 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, - 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, - 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, - 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, - 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, - 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, - 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, - 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, - 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, - 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, - 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, - // Block 0xc, offset 0x300 - 0x311: 0x2000, - 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, - 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, - 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, - 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, - 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, - 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, - 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, - // Block 0xd, offset 0x340 - 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, - 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, - // Block 0xe, offset 0x380 - 0x381: 0x2000, - 0x390: 0x2000, 0x391: 0x2000, - 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, - 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, - 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, - 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, - 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, - 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, - 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, - 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, - 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, - 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, - // Block 0x10, offset 0x400 - 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, - 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, - 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, - 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, - 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, - 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, - 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, - 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, - 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, - 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, - 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, - // Block 0x11, offset 0x440 - 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, - 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, - 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, - 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, - 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, - 0x45e: 0x4000, 0x45f: 0x4000, - // Block 0x12, offset 0x480 - 0x490: 0x2000, - 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, - 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, - 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, - 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, - 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, - 0x4bb: 0x2000, - 0x4be: 0x2000, - // Block 0x13, offset 0x4c0 - 0x4f4: 0x2000, - 0x4ff: 0x2000, - // Block 0x14, offset 0x500 - 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, - 0x529: 0xa009, - 0x52c: 0x2000, - // Block 0x15, offset 0x540 - 0x543: 0x2000, 0x545: 0x2000, - 0x549: 0x2000, - 0x553: 0x2000, 0x556: 0x2000, - 0x561: 0x2000, 0x562: 0x2000, - 0x566: 0x2000, - 0x56b: 0x2000, - // Block 0x16, offset 0x580 - 0x593: 0x2000, 0x594: 0x2000, - 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, - 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, - 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, - 0x5aa: 0x2000, 0x5ab: 0x2000, - 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, - 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, - // Block 0x17, offset 0x5c0 - 0x5c9: 0x2000, - 0x5d0: 0x200a, 0x5d1: 0x200b, - 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, - 0x5d8: 0x2000, 0x5d9: 0x2000, - 0x5f8: 0x2000, 0x5f9: 0x2000, - // Block 0x18, offset 0x600 - 0x612: 0x2000, 0x614: 0x2000, - 0x627: 0x2000, - // Block 0x19, offset 0x640 - 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, - 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, - 0x64f: 0x2000, 0x651: 0x2000, - 0x655: 0x2000, - 0x65a: 0x2000, 0x65d: 0x2000, - 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, - 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, - 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, - 0x674: 0x2000, 0x675: 0x2000, - 0x676: 0x2000, 0x677: 0x2000, - 0x67c: 0x2000, 0x67d: 0x2000, - // Block 0x1a, offset 0x680 - 0x688: 0x2000, - 0x68c: 0x2000, - 0x692: 0x2000, - 0x6a0: 0x2000, 0x6a1: 0x2000, - 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, - 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, - // Block 0x1b, offset 0x6c0 - 0x6c2: 0x2000, 0x6c3: 0x2000, - 0x6c6: 0x2000, 0x6c7: 0x2000, - 0x6d5: 0x2000, - 0x6d9: 0x2000, - 0x6e5: 0x2000, - 0x6ff: 0x2000, - // Block 0x1c, offset 0x700 - 0x712: 0x2000, - 0x71a: 0x4000, 0x71b: 0x4000, - 0x729: 0x4000, - 0x72a: 0x4000, - // Block 0x1d, offset 0x740 - 0x769: 0x4000, - 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, - 0x770: 0x4000, 0x773: 0x4000, - // Block 0x1e, offset 0x780 - 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, - 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, - 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, - 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, - 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, - 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, - 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, - 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, - 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, - 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, - 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, - 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, - 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, - 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, - 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, - 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, - // Block 0x20, offset 0x800 - 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, - 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, - 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, - 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, - 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, - 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, - 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, - 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, - 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, - 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, - 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, - // Block 0x21, offset 0x840 - 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, - 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, - 0x850: 0x2000, 0x851: 0x2000, - 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, - 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, - 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, - 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, - 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, - 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, - // Block 0x22, offset 0x880 - 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, - 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, - 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, - 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, - 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, - 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, - 0x8b2: 0x2000, 0x8b3: 0x2000, - 0x8b6: 0x2000, 0x8b7: 0x2000, - 0x8bc: 0x2000, 0x8bd: 0x2000, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x2000, 0x8c1: 0x2000, - 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, - 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, - 0x8e2: 0x2000, 0x8e3: 0x2000, - 0x8e4: 0x2000, 0x8e5: 0x2000, - 0x8ef: 0x2000, - 0x8fd: 0x4000, 0x8fe: 0x4000, - // Block 0x24, offset 0x900 - 0x905: 0x2000, - 0x906: 0x2000, 0x909: 0x2000, - 0x90e: 0x2000, 0x90f: 0x2000, - 0x914: 0x4000, 0x915: 0x4000, - 0x91c: 0x2000, - 0x91e: 0x2000, - // Block 0x25, offset 0x940 - 0x940: 0x2000, 0x942: 0x2000, - 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, - 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, - 0x952: 0x4000, 0x953: 0x4000, - 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, - 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, - 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, - 0x97f: 0x4000, - // Block 0x26, offset 0x980 - 0x993: 0x4000, - 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, - 0x9aa: 0x4000, 0x9ab: 0x4000, - 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, - // Block 0x27, offset 0x9c0 - 0x9c4: 0x4000, 0x9c5: 0x4000, - 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, - 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, - 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, - 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, - 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, - 0x9e8: 0x2000, 0x9e9: 0x2000, - 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, - 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, - 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, - 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, - // Block 0x28, offset 0xa00 - 0xa05: 0x4000, - 0xa0a: 0x4000, 0xa0b: 0x4000, - 0xa28: 0x4000, - 0xa3d: 0x2000, - // Block 0x29, offset 0xa40 - 0xa4c: 0x4000, 0xa4e: 0x4000, - 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, - 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, - 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, - // Block 0x2a, offset 0xa80 - 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, - 0xab0: 0x4000, - 0xabf: 0x4000, - // Block 0x2b, offset 0xac0 - 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, - 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, - // Block 0x2c, offset 0xb00 - 0xb05: 0x6010, - 0xb06: 0x6011, - // Block 0x2d, offset 0xb40 - 0xb5b: 0x4000, 0xb5c: 0x4000, - // Block 0x2e, offset 0xb80 - 0xb90: 0x4000, - 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, - 0xb98: 0x2000, 0xb99: 0x2000, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, - 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, - 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, - 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, - 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, - 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, - 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, - 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, - 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, - 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, - 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, - // Block 0x30, offset 0xc00 - 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, - 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, - 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, - 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, - 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, - 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, - 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, - 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, - 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, - // Block 0x31, offset 0xc40 - 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, - 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, - 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, - 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, - 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, - 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, - // Block 0x32, offset 0xc80 - 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, - 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, - 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, - 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, - 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, - 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, - 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, - 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, - 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, - 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, - 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, - // Block 0x33, offset 0xcc0 - 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, - 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, - 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, - 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, - 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, - 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, - 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, - 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, - 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, - 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, - 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, - // Block 0x34, offset 0xd00 - 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, - 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, - 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, - 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, - 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, - 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, - 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, - 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, - 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, - 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, - 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, - // Block 0x35, offset 0xd40 - 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, - 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, - 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, - 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, - 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, - 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, - 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, - 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, - 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, - 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, - 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, - // Block 0x36, offset 0xd80 - 0xd85: 0x4000, - 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, - 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, - 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, - 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, - 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, - 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, - 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, - 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, - 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, - 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, - 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, - 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, - 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, - 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, - 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, - 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, - 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, - 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, - 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, - 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, - // Block 0x38, offset 0xe00 - 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, - 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, - 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, - 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, - 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, - 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, - 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, - 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, - 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, - 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, - // Block 0x39, offset 0xe40 - 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, - 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, - 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, - 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, - 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, - 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, - 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, - 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, - 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, - // Block 0x3a, offset 0xe80 - 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, - 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, - 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, - 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, - 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, - 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, - 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, - 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, - 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, - 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, - 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, - // Block 0x3b, offset 0xec0 - 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, - 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, - 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, - 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, - 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, - 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, - 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, - 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, - 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, - 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, - 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, - // Block 0x3c, offset 0xf00 - 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, - 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, - 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, - 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, - 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, - 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, - 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, - 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, - 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, - 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, - 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, - // Block 0x3d, offset 0xf40 - 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, - 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, - 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, - 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, - 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, - 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, - 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, - 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, - 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, - 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, - 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, - // Block 0x3e, offset 0xf80 - 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, - 0xf86: 0x4000, - // Block 0x3f, offset 0xfc0 - 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, - 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, - 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, - 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, - 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, - 0xffc: 0x4000, - // Block 0x40, offset 0x1000 - 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, - 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, - 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, - 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, - 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, - 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, - // Block 0x41, offset 0x1040 - 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, - 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, - 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, - 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, - 0x1058: 0x4000, 0x1059: 0x4000, - 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, - 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, - 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, - // Block 0x42, offset 0x1080 - 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, - 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, - 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, - 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, - 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, - 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, - 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, - 0x10aa: 0x4000, 0x10ab: 0x4000, - // Block 0x43, offset 0x10c0 - 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, - 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, - 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, - 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, - 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, - 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, - 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, - 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, - 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, - 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, - 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, - // Block 0x44, offset 0x1100 - 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, - 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, - 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, - 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, - 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, - 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, - 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, - 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, - 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, - 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, - 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, - // Block 0x45, offset 0x1140 - 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, - 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, - 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, - 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, - 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, - 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, - 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, - 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, - 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, - 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, - 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, - // Block 0x46, offset 0x1180 - 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, - 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, - 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, - 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, - 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, - 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, - 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, - 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, - 0x11bd: 0x2000, - // Block 0x47, offset 0x11c0 - 0x11e0: 0x4000, - // Block 0x48, offset 0x1200 - 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, - 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, - 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, - 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, - 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, - 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, - 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, - 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, - // Block 0x49, offset 0x1240 - 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, - 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, - 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, - 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, - 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, - 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, - 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, - 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, - 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, - // Block 0x4a, offset 0x1280 - 0x1280: 0x4000, 0x1281: 0x4000, - // Block 0x4b, offset 0x12c0 - 0x12c4: 0x4000, - // Block 0x4c, offset 0x1300 - 0x130f: 0x4000, - // Block 0x4d, offset 0x1340 - 0x1340: 0x2000, 0x1341: 0x2000, 0x1342: 0x2000, 0x1343: 0x2000, 0x1344: 0x2000, 0x1345: 0x2000, - 0x1346: 0x2000, 0x1347: 0x2000, 0x1348: 0x2000, 0x1349: 0x2000, 0x134a: 0x2000, - 0x1350: 0x2000, 0x1351: 0x2000, - 0x1352: 0x2000, 0x1353: 0x2000, 0x1354: 0x2000, 0x1355: 0x2000, 0x1356: 0x2000, 0x1357: 0x2000, - 0x1358: 0x2000, 0x1359: 0x2000, 0x135a: 0x2000, 0x135b: 0x2000, 0x135c: 0x2000, 0x135d: 0x2000, - 0x135e: 0x2000, 0x135f: 0x2000, 0x1360: 0x2000, 0x1361: 0x2000, 0x1362: 0x2000, 0x1363: 0x2000, - 0x1364: 0x2000, 0x1365: 0x2000, 0x1366: 0x2000, 0x1367: 0x2000, 0x1368: 0x2000, 0x1369: 0x2000, - 0x136a: 0x2000, 0x136b: 0x2000, 0x136c: 0x2000, 0x136d: 0x2000, - 0x1370: 0x2000, 0x1371: 0x2000, 0x1372: 0x2000, 0x1373: 0x2000, 0x1374: 0x2000, 0x1375: 0x2000, - 0x1376: 0x2000, 0x1377: 0x2000, 0x1378: 0x2000, 0x1379: 0x2000, 0x137a: 0x2000, 0x137b: 0x2000, - 0x137c: 0x2000, 0x137d: 0x2000, 0x137e: 0x2000, 0x137f: 0x2000, - // Block 0x4e, offset 0x1380 - 0x1380: 0x2000, 0x1381: 0x2000, 0x1382: 0x2000, 0x1383: 0x2000, 0x1384: 0x2000, 0x1385: 0x2000, - 0x1386: 0x2000, 0x1387: 0x2000, 0x1388: 0x2000, 0x1389: 0x2000, 0x138a: 0x2000, 0x138b: 0x2000, - 0x138c: 0x2000, 0x138d: 0x2000, 0x138e: 0x2000, 0x138f: 0x2000, 0x1390: 0x2000, 0x1391: 0x2000, - 0x1392: 0x2000, 0x1393: 0x2000, 0x1394: 0x2000, 0x1395: 0x2000, 0x1396: 0x2000, 0x1397: 0x2000, - 0x1398: 0x2000, 0x1399: 0x2000, 0x139a: 0x2000, 0x139b: 0x2000, 0x139c: 0x2000, 0x139d: 0x2000, - 0x139e: 0x2000, 0x139f: 0x2000, 0x13a0: 0x2000, 0x13a1: 0x2000, 0x13a2: 0x2000, 0x13a3: 0x2000, - 0x13a4: 0x2000, 0x13a5: 0x2000, 0x13a6: 0x2000, 0x13a7: 0x2000, 0x13a8: 0x2000, 0x13a9: 0x2000, - 0x13b0: 0x2000, 0x13b1: 0x2000, 0x13b2: 0x2000, 0x13b3: 0x2000, 0x13b4: 0x2000, 0x13b5: 0x2000, - 0x13b6: 0x2000, 0x13b7: 0x2000, 0x13b8: 0x2000, 0x13b9: 0x2000, 0x13ba: 0x2000, 0x13bb: 0x2000, - 0x13bc: 0x2000, 0x13bd: 0x2000, 0x13be: 0x2000, 0x13bf: 0x2000, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, - 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, 0x13cb: 0x2000, - 0x13cc: 0x2000, 0x13cd: 0x2000, 0x13ce: 0x4000, 0x13cf: 0x2000, 0x13d0: 0x2000, 0x13d1: 0x4000, - 0x13d2: 0x4000, 0x13d3: 0x4000, 0x13d4: 0x4000, 0x13d5: 0x4000, 0x13d6: 0x4000, 0x13d7: 0x4000, - 0x13d8: 0x4000, 0x13d9: 0x4000, 0x13da: 0x4000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, - 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, - 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, - 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, - // Block 0x50, offset 0x1400 - 0x1400: 0x4000, 0x1401: 0x4000, 0x1402: 0x4000, - 0x1410: 0x4000, 0x1411: 0x4000, - 0x1412: 0x4000, 0x1413: 0x4000, 0x1414: 0x4000, 0x1415: 0x4000, 0x1416: 0x4000, 0x1417: 0x4000, - 0x1418: 0x4000, 0x1419: 0x4000, 0x141a: 0x4000, 0x141b: 0x4000, 0x141c: 0x4000, 0x141d: 0x4000, - 0x141e: 0x4000, 0x141f: 0x4000, 0x1420: 0x4000, 0x1421: 0x4000, 0x1422: 0x4000, 0x1423: 0x4000, - 0x1424: 0x4000, 0x1425: 0x4000, 0x1426: 0x4000, 0x1427: 0x4000, 0x1428: 0x4000, 0x1429: 0x4000, - 0x142a: 0x4000, 0x142b: 0x4000, 0x142c: 0x4000, 0x142d: 0x4000, 0x142e: 0x4000, 0x142f: 0x4000, - 0x1430: 0x4000, 0x1431: 0x4000, 0x1432: 0x4000, 0x1433: 0x4000, 0x1434: 0x4000, 0x1435: 0x4000, - 0x1436: 0x4000, 0x1437: 0x4000, 0x1438: 0x4000, 0x1439: 0x4000, 0x143a: 0x4000, 0x143b: 0x4000, - // Block 0x51, offset 0x1440 - 0x1440: 0x4000, 0x1441: 0x4000, 0x1442: 0x4000, 0x1443: 0x4000, 0x1444: 0x4000, 0x1445: 0x4000, - 0x1446: 0x4000, 0x1447: 0x4000, 0x1448: 0x4000, - 0x1450: 0x4000, 0x1451: 0x4000, - // Block 0x52, offset 0x1480 - 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, 0x1483: 0x4000, 0x1484: 0x4000, 0x1485: 0x4000, - 0x1486: 0x4000, 0x1487: 0x4000, 0x1488: 0x4000, 0x1489: 0x4000, 0x148a: 0x4000, 0x148b: 0x4000, - 0x148c: 0x4000, 0x148d: 0x4000, 0x148e: 0x4000, 0x148f: 0x4000, 0x1490: 0x4000, 0x1491: 0x4000, - 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, - 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, - 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, - 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, - 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, - 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, - 0x14bc: 0x4000, 0x14bd: 0x4000, 0x14be: 0x4000, 0x14bf: 0x4000, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, - 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, 0x14c9: 0x4000, 0x14ca: 0x4000, 0x14cb: 0x4000, - 0x14cc: 0x4000, 0x14cd: 0x4000, 0x14ce: 0x4000, 0x14cf: 0x4000, 0x14d0: 0x4000, 0x14d1: 0x4000, - 0x14d2: 0x4000, 0x14d3: 0x4000, 0x14d4: 0x4000, 0x14d5: 0x4000, 0x14d6: 0x4000, 0x14d7: 0x4000, - 0x14d8: 0x4000, 0x14d9: 0x4000, 0x14da: 0x4000, 0x14db: 0x4000, 0x14dc: 0x4000, 0x14dd: 0x4000, - 0x14de: 0x4000, 0x14df: 0x4000, 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, - 0x14e4: 0x4000, 0x14e5: 0x4000, 0x14e6: 0x4000, 0x14e7: 0x4000, 0x14e8: 0x4000, 0x14e9: 0x4000, - 0x14ea: 0x4000, 0x14eb: 0x4000, 0x14ec: 0x4000, 0x14ed: 0x4000, 0x14ee: 0x4000, 0x14ef: 0x4000, - 0x14f0: 0x4000, 0x14f1: 0x4000, 0x14f2: 0x4000, 0x14f3: 0x4000, 0x14f4: 0x4000, 0x14f5: 0x4000, - 0x14f6: 0x4000, 0x14f7: 0x4000, 0x14f8: 0x4000, 0x14f9: 0x4000, 0x14fa: 0x4000, 0x14fb: 0x4000, - 0x14fc: 0x4000, 0x14fe: 0x4000, 0x14ff: 0x4000, - // Block 0x54, offset 0x1500 - 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, - 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, - 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, - 0x1512: 0x4000, 0x1513: 0x4000, - 0x1520: 0x4000, 0x1521: 0x4000, 0x1522: 0x4000, 0x1523: 0x4000, - 0x1524: 0x4000, 0x1525: 0x4000, 0x1526: 0x4000, 0x1527: 0x4000, 0x1528: 0x4000, 0x1529: 0x4000, - 0x152a: 0x4000, 0x152b: 0x4000, 0x152c: 0x4000, 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, - 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, - 0x1536: 0x4000, 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, - 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, - // Block 0x55, offset 0x1540 - 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, - 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, - 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, - 0x1552: 0x4000, 0x1553: 0x4000, - 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, - 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, - 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, - 0x1570: 0x4000, 0x1574: 0x4000, - 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, - 0x157c: 0x4000, 0x157d: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, - // Block 0x56, offset 0x1580 - 0x1580: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, - 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, - 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, - 0x1592: 0x4000, 0x1593: 0x4000, 0x1594: 0x4000, 0x1595: 0x4000, 0x1596: 0x4000, 0x1597: 0x4000, - 0x1598: 0x4000, 0x1599: 0x4000, 0x159a: 0x4000, 0x159b: 0x4000, 0x159c: 0x4000, 0x159d: 0x4000, - 0x159e: 0x4000, 0x159f: 0x4000, 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, - 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, - 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, - 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, - 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, - 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, - // Block 0x57, offset 0x15c0 - 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, - 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, 0x15cb: 0x4000, - 0x15cc: 0x4000, 0x15cd: 0x4000, 0x15ce: 0x4000, 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, - 0x15d2: 0x4000, 0x15d3: 0x4000, 0x15d4: 0x4000, 0x15d5: 0x4000, 0x15d6: 0x4000, 0x15d7: 0x4000, - 0x15d8: 0x4000, 0x15d9: 0x4000, 0x15da: 0x4000, 0x15db: 0x4000, 0x15dc: 0x4000, 0x15dd: 0x4000, - 0x15de: 0x4000, 0x15df: 0x4000, 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, - 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, - 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, - 0x15f0: 0x4000, 0x15f1: 0x4000, 0x15f2: 0x4000, 0x15f3: 0x4000, 0x15f4: 0x4000, 0x15f5: 0x4000, - 0x15f6: 0x4000, 0x15f7: 0x4000, 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, - 0x15fc: 0x4000, 0x15ff: 0x4000, - // Block 0x58, offset 0x1600 - 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, - 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, - 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, - 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, - 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, - 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, - 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, - 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, - 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, - 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, - 0x163c: 0x4000, 0x163d: 0x4000, - // Block 0x59, offset 0x1640 - 0x164b: 0x4000, - 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, - 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, - 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, - 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, - 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, - 0x167a: 0x4000, - // Block 0x5a, offset 0x1680 - 0x1695: 0x4000, 0x1696: 0x4000, - 0x16a4: 0x4000, - // Block 0x5b, offset 0x16c0 - 0x16fb: 0x4000, - 0x16fc: 0x4000, 0x16fd: 0x4000, 0x16fe: 0x4000, 0x16ff: 0x4000, - // Block 0x5c, offset 0x1700 - 0x1700: 0x4000, 0x1701: 0x4000, 0x1702: 0x4000, 0x1703: 0x4000, 0x1704: 0x4000, 0x1705: 0x4000, - 0x1706: 0x4000, 0x1707: 0x4000, 0x1708: 0x4000, 0x1709: 0x4000, 0x170a: 0x4000, 0x170b: 0x4000, - 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x170f: 0x4000, - // Block 0x5d, offset 0x1740 - 0x1740: 0x4000, 0x1741: 0x4000, 0x1742: 0x4000, 0x1743: 0x4000, 0x1744: 0x4000, 0x1745: 0x4000, - 0x174c: 0x4000, 0x1750: 0x4000, 0x1751: 0x4000, - 0x1752: 0x4000, - 0x176b: 0x4000, 0x176c: 0x4000, - 0x1774: 0x4000, 0x1775: 0x4000, - 0x1776: 0x4000, - // Block 0x5e, offset 0x1780 - 0x1790: 0x4000, 0x1791: 0x4000, - 0x1792: 0x4000, 0x1793: 0x4000, 0x1794: 0x4000, 0x1795: 0x4000, 0x1796: 0x4000, 0x1797: 0x4000, - 0x1798: 0x4000, 0x1799: 0x4000, 0x179a: 0x4000, 0x179b: 0x4000, 0x179c: 0x4000, 0x179d: 0x4000, - 0x179e: 0x4000, 0x17a0: 0x4000, 0x17a1: 0x4000, 0x17a2: 0x4000, 0x17a3: 0x4000, - 0x17a4: 0x4000, 0x17a5: 0x4000, 0x17a6: 0x4000, 0x17a7: 0x4000, - 0x17b0: 0x4000, 0x17b3: 0x4000, 0x17b4: 0x4000, 0x17b5: 0x4000, - 0x17b6: 0x4000, 0x17b7: 0x4000, 0x17b8: 0x4000, 0x17b9: 0x4000, 0x17ba: 0x4000, 0x17bb: 0x4000, - 0x17bc: 0x4000, 0x17bd: 0x4000, 0x17be: 0x4000, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, - 0x17c6: 0x4000, 0x17c7: 0x4000, 0x17c8: 0x4000, 0x17c9: 0x4000, 0x17ca: 0x4000, 0x17cb: 0x4000, - 0x17d0: 0x4000, 0x17d1: 0x4000, - 0x17d2: 0x4000, 0x17d3: 0x4000, 0x17d4: 0x4000, 0x17d5: 0x4000, 0x17d6: 0x4000, 0x17d7: 0x4000, - 0x17d8: 0x4000, 0x17d9: 0x4000, 0x17da: 0x4000, 0x17db: 0x4000, 0x17dc: 0x4000, 0x17dd: 0x4000, - 0x17de: 0x4000, - // Block 0x60, offset 0x1800 - 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000, - 0x1806: 0x4000, 0x1807: 0x4000, 0x1808: 0x4000, 0x1809: 0x4000, 0x180a: 0x4000, 0x180b: 0x4000, - 0x180c: 0x4000, 0x180d: 0x4000, 0x180e: 0x4000, 0x180f: 0x4000, 0x1810: 0x4000, 0x1811: 0x4000, - // Block 0x61, offset 0x1840 - 0x1840: 0x4000, - // Block 0x62, offset 0x1880 - 0x1880: 0x2000, 0x1881: 0x2000, 0x1882: 0x2000, 0x1883: 0x2000, 0x1884: 0x2000, 0x1885: 0x2000, - 0x1886: 0x2000, 0x1887: 0x2000, 0x1888: 0x2000, 0x1889: 0x2000, 0x188a: 0x2000, 0x188b: 0x2000, - 0x188c: 0x2000, 0x188d: 0x2000, 0x188e: 0x2000, 0x188f: 0x2000, 0x1890: 0x2000, 0x1891: 0x2000, - 0x1892: 0x2000, 0x1893: 0x2000, 0x1894: 0x2000, 0x1895: 0x2000, 0x1896: 0x2000, 0x1897: 0x2000, - 0x1898: 0x2000, 0x1899: 0x2000, 0x189a: 0x2000, 0x189b: 0x2000, 0x189c: 0x2000, 0x189d: 0x2000, - 0x189e: 0x2000, 0x189f: 0x2000, 0x18a0: 0x2000, 0x18a1: 0x2000, 0x18a2: 0x2000, 0x18a3: 0x2000, - 0x18a4: 0x2000, 0x18a5: 0x2000, 0x18a6: 0x2000, 0x18a7: 0x2000, 0x18a8: 0x2000, 0x18a9: 0x2000, - 0x18aa: 0x2000, 0x18ab: 0x2000, 0x18ac: 0x2000, 0x18ad: 0x2000, 0x18ae: 0x2000, 0x18af: 0x2000, - 0x18b0: 0x2000, 0x18b1: 0x2000, 0x18b2: 0x2000, 0x18b3: 0x2000, 0x18b4: 0x2000, 0x18b5: 0x2000, - 0x18b6: 0x2000, 0x18b7: 0x2000, 0x18b8: 0x2000, 0x18b9: 0x2000, 0x18ba: 0x2000, 0x18bb: 0x2000, - 0x18bc: 0x2000, 0x18bd: 0x2000, -} - -// widthIndex: 22 blocks, 1408 entries, 1408 bytes -// Block 0 is the zero block. -var widthIndex = [1408]uint8{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, - 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, - 0xd0: 0x0c, 0xd1: 0x0d, - 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, - 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, - 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, - // Block 0x4, offset 0x100 - 0x104: 0x0e, 0x105: 0x0f, - // Block 0x5, offset 0x140 - 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, - 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, - 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, - 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, - 0x166: 0x2a, - 0x16c: 0x2b, 0x16d: 0x2c, - 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, - // Block 0x6, offset 0x180 - 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, - 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, - 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, - 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, - 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, - 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, - 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, - 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, - 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, - 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, - 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, - 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, - 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, - 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, - 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, - // Block 0x8, offset 0x200 - 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, - 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, - 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, - 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, - 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, - 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, - 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, - 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, - // Block 0x9, offset 0x240 - 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, - 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, - 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, - 0x265: 0x3d, - 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, - 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, - // Block 0xa, offset 0x280 - 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, - 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, - 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, - 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, - 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, - 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, - 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, - 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, - 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, - 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, - 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, - // Block 0xc, offset 0x300 - 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, - 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, - 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, - 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, - 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, - 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, - 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, - // Block 0xd, offset 0x340 - 0x37f: 0x45, - // Block 0xe, offset 0x380 - 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, - 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, - 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, - 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, - 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, - 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x48, - // Block 0x10, offset 0x400 - 0x400: 0x49, 0x403: 0x4a, 0x404: 0x4b, 0x405: 0x4c, 0x406: 0x4d, - 0x408: 0x4e, 0x409: 0x4f, 0x40c: 0x50, 0x40d: 0x51, 0x40e: 0x52, 0x40f: 0x53, - 0x410: 0x3a, 0x411: 0x54, 0x412: 0x0e, 0x413: 0x55, 0x414: 0x56, 0x415: 0x57, 0x416: 0x58, 0x417: 0x59, - 0x418: 0x0e, 0x419: 0x5a, 0x41a: 0x0e, 0x41b: 0x5b, - 0x424: 0x5c, 0x425: 0x5d, 0x426: 0x5e, 0x427: 0x5f, - // Block 0x11, offset 0x440 - 0x456: 0x0b, 0x457: 0x06, - 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, - 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, - 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, - 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, - 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, - // Block 0x12, offset 0x480 - 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, - 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, - 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, - 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, - 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, - 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, - 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, - 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x60, - // Block 0x14, offset 0x500 - 0x520: 0x10, - 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, - 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, - // Block 0x15, offset 0x540 - 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, - 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, -} - -// inverseData contains 4-byte entries of the following format: -// -// <0 padding> -// -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// -// { 0x01, 0xE0, 0x00, 0x00 } -// -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// -// E0 ^ A1 = 41. -// -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// -// E0 ^ A2 = 42. -// -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8. -var inverseData = [150][4]byte{ - {0x00, 0x00, 0x00, 0x00}, - {0x03, 0xe3, 0x80, 0xa0}, - {0x03, 0xef, 0xbc, 0xa0}, - {0x03, 0xef, 0xbc, 0xe0}, - {0x03, 0xef, 0xbd, 0xe0}, - {0x03, 0xef, 0xbf, 0x02}, - {0x03, 0xef, 0xbf, 0x00}, - {0x03, 0xef, 0xbf, 0x0e}, - {0x03, 0xef, 0xbf, 0x0c}, - {0x03, 0xef, 0xbf, 0x0f}, - {0x03, 0xef, 0xbf, 0x39}, - {0x03, 0xef, 0xbf, 0x3b}, - {0x03, 0xef, 0xbf, 0x3f}, - {0x03, 0xef, 0xbf, 0x2a}, - {0x03, 0xef, 0xbf, 0x0d}, - {0x03, 0xef, 0xbf, 0x25}, - {0x03, 0xef, 0xbd, 0x1a}, - {0x03, 0xef, 0xbd, 0x26}, - {0x01, 0xa0, 0x00, 0x00}, - {0x03, 0xef, 0xbd, 0x25}, - {0x03, 0xef, 0xbd, 0x23}, - {0x03, 0xef, 0xbd, 0x2e}, - {0x03, 0xef, 0xbe, 0x07}, - {0x03, 0xef, 0xbe, 0x05}, - {0x03, 0xef, 0xbd, 0x06}, - {0x03, 0xef, 0xbd, 0x13}, - {0x03, 0xef, 0xbd, 0x0b}, - {0x03, 0xef, 0xbd, 0x16}, - {0x03, 0xef, 0xbd, 0x0c}, - {0x03, 0xef, 0xbd, 0x15}, - {0x03, 0xef, 0xbd, 0x0d}, - {0x03, 0xef, 0xbd, 0x1c}, - {0x03, 0xef, 0xbd, 0x02}, - {0x03, 0xef, 0xbd, 0x1f}, - {0x03, 0xef, 0xbd, 0x1d}, - {0x03, 0xef, 0xbd, 0x17}, - {0x03, 0xef, 0xbd, 0x08}, - {0x03, 0xef, 0xbd, 0x09}, - {0x03, 0xef, 0xbd, 0x0e}, - {0x03, 0xef, 0xbd, 0x04}, - {0x03, 0xef, 0xbd, 0x05}, - {0x03, 0xef, 0xbe, 0x3f}, - {0x03, 0xef, 0xbe, 0x00}, - {0x03, 0xef, 0xbd, 0x2c}, - {0x03, 0xef, 0xbe, 0x06}, - {0x03, 0xef, 0xbe, 0x0c}, - {0x03, 0xef, 0xbe, 0x0f}, - {0x03, 0xef, 0xbe, 0x0d}, - {0x03, 0xef, 0xbe, 0x0b}, - {0x03, 0xef, 0xbe, 0x19}, - {0x03, 0xef, 0xbe, 0x15}, - {0x03, 0xef, 0xbe, 0x11}, - {0x03, 0xef, 0xbe, 0x31}, - {0x03, 0xef, 0xbe, 0x33}, - {0x03, 0xef, 0xbd, 0x0f}, - {0x03, 0xef, 0xbe, 0x30}, - {0x03, 0xef, 0xbe, 0x3e}, - {0x03, 0xef, 0xbe, 0x32}, - {0x03, 0xef, 0xbe, 0x36}, - {0x03, 0xef, 0xbd, 0x14}, - {0x03, 0xef, 0xbe, 0x2e}, - {0x03, 0xef, 0xbd, 0x1e}, - {0x03, 0xef, 0xbe, 0x10}, - {0x03, 0xef, 0xbf, 0x13}, - {0x03, 0xef, 0xbf, 0x15}, - {0x03, 0xef, 0xbf, 0x17}, - {0x03, 0xef, 0xbf, 0x1f}, - {0x03, 0xef, 0xbf, 0x1d}, - {0x03, 0xef, 0xbf, 0x1b}, - {0x03, 0xef, 0xbf, 0x09}, - {0x03, 0xef, 0xbf, 0x0b}, - {0x03, 0xef, 0xbf, 0x37}, - {0x03, 0xef, 0xbe, 0x04}, - {0x01, 0xe0, 0x00, 0x00}, - {0x03, 0xe2, 0xa6, 0x1a}, - {0x03, 0xe2, 0xa6, 0x26}, - {0x03, 0xe3, 0x80, 0x23}, - {0x03, 0xe3, 0x80, 0x2e}, - {0x03, 0xe3, 0x80, 0x25}, - {0x03, 0xe3, 0x83, 0x1e}, - {0x03, 0xe3, 0x83, 0x14}, - {0x03, 0xe3, 0x82, 0x06}, - {0x03, 0xe3, 0x82, 0x0b}, - {0x03, 0xe3, 0x82, 0x0c}, - {0x03, 0xe3, 0x82, 0x0d}, - {0x03, 0xe3, 0x82, 0x02}, - {0x03, 0xe3, 0x83, 0x0f}, - {0x03, 0xe3, 0x83, 0x08}, - {0x03, 0xe3, 0x83, 0x09}, - {0x03, 0xe3, 0x83, 0x2c}, - {0x03, 0xe3, 0x83, 0x0c}, - {0x03, 0xe3, 0x82, 0x13}, - {0x03, 0xe3, 0x82, 0x16}, - {0x03, 0xe3, 0x82, 0x15}, - {0x03, 0xe3, 0x82, 0x1c}, - {0x03, 0xe3, 0x82, 0x1f}, - {0x03, 0xe3, 0x82, 0x1d}, - {0x03, 0xe3, 0x82, 0x1a}, - {0x03, 0xe3, 0x82, 0x17}, - {0x03, 0xe3, 0x82, 0x08}, - {0x03, 0xe3, 0x82, 0x09}, - {0x03, 0xe3, 0x82, 0x0e}, - {0x03, 0xe3, 0x82, 0x04}, - {0x03, 0xe3, 0x82, 0x05}, - {0x03, 0xe3, 0x82, 0x3f}, - {0x03, 0xe3, 0x83, 0x00}, - {0x03, 0xe3, 0x83, 0x06}, - {0x03, 0xe3, 0x83, 0x05}, - {0x03, 0xe3, 0x83, 0x0d}, - {0x03, 0xe3, 0x83, 0x0b}, - {0x03, 0xe3, 0x83, 0x07}, - {0x03, 0xe3, 0x83, 0x19}, - {0x03, 0xe3, 0x83, 0x15}, - {0x03, 0xe3, 0x83, 0x11}, - {0x03, 0xe3, 0x83, 0x31}, - {0x03, 0xe3, 0x83, 0x33}, - {0x03, 0xe3, 0x83, 0x30}, - {0x03, 0xe3, 0x83, 0x3e}, - {0x03, 0xe3, 0x83, 0x32}, - {0x03, 0xe3, 0x83, 0x36}, - {0x03, 0xe3, 0x83, 0x2e}, - {0x03, 0xe3, 0x82, 0x07}, - {0x03, 0xe3, 0x85, 0x04}, - {0x03, 0xe3, 0x84, 0x10}, - {0x03, 0xe3, 0x85, 0x30}, - {0x03, 0xe3, 0x85, 0x0d}, - {0x03, 0xe3, 0x85, 0x13}, - {0x03, 0xe3, 0x85, 0x15}, - {0x03, 0xe3, 0x85, 0x17}, - {0x03, 0xe3, 0x85, 0x1f}, - {0x03, 0xe3, 0x85, 0x1d}, - {0x03, 0xe3, 0x85, 0x1b}, - {0x03, 0xe3, 0x85, 0x09}, - {0x03, 0xe3, 0x85, 0x0f}, - {0x03, 0xe3, 0x85, 0x0b}, - {0x03, 0xe3, 0x85, 0x37}, - {0x03, 0xe3, 0x85, 0x3b}, - {0x03, 0xe3, 0x85, 0x39}, - {0x03, 0xe3, 0x85, 0x3f}, - {0x02, 0xc2, 0x02, 0x00}, - {0x02, 0xc2, 0x0e, 0x00}, - {0x02, 0xc2, 0x0c, 0x00}, - {0x02, 0xc2, 0x00, 0x00}, - {0x03, 0xe2, 0x82, 0x0f}, - {0x03, 0xe2, 0x94, 0x2a}, - {0x03, 0xe2, 0x86, 0x39}, - {0x03, 0xe2, 0x86, 0x3b}, - {0x03, 0xe2, 0x86, 0x3f}, - {0x03, 0xe2, 0x96, 0x0d}, - {0x03, 0xe2, 0x97, 0x25}, -} - -// Total table size 14680 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/transform.go b/vendor/golang.org/x/text/width/transform.go deleted file mode 100644 index 0049f700a..000000000 --- a/vendor/golang.org/x/text/width/transform.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package width - -import ( - "unicode/utf8" - - "golang.org/x/text/transform" -) - -type foldTransform struct { - transform.NopResetter -} - -func (foldTransform) Span(src []byte, atEOF bool) (n int, err error) { - for n < len(src) { - if src[n] < utf8.RuneSelf { - // ASCII fast path. - for n++; n < len(src) && src[n] < utf8.RuneSelf; n++ { - } - continue - } - v, size := trie.lookup(src[n:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - err = transform.ErrShortSrc - } else { - n = len(src) - } - break - } - if elem(v)&tagNeedsFold != 0 { - err = transform.ErrEndOfSpan - break - } - n += size - } - return n, err -} - -func (foldTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - for nSrc < len(src) { - if src[nSrc] < utf8.RuneSelf { - // ASCII fast path. - start, end := nSrc, len(src) - if d := len(dst) - nDst; d < end-start { - end = nSrc + d - } - for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ { - } - n := copy(dst[nDst:], src[start:nSrc]) - if nDst += n; nDst == len(dst) { - nSrc = start + n - if nSrc == len(src) { - return nDst, nSrc, nil - } - if src[nSrc] < utf8.RuneSelf { - return nDst, nSrc, transform.ErrShortDst - } - } - continue - } - v, size := trie.lookup(src[nSrc:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - return nDst, nSrc, transform.ErrShortSrc - } - size = 1 // gobble 1 byte - } - if elem(v)&tagNeedsFold == 0 { - if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { - return nDst, nSrc, transform.ErrShortDst - } - nDst += size - } else { - data := inverseData[byte(v)] - if len(dst)-nDst < int(data[0]) { - return nDst, nSrc, transform.ErrShortDst - } - i := 1 - for end := int(data[0]); i < end; i++ { - dst[nDst] = data[i] - nDst++ - } - dst[nDst] = data[i] ^ src[nSrc+size-1] - nDst++ - } - nSrc += size - } - return nDst, nSrc, nil -} - -type narrowTransform struct { - transform.NopResetter -} - -func (narrowTransform) Span(src []byte, atEOF bool) (n int, err error) { - for n < len(src) { - if src[n] < utf8.RuneSelf { - // ASCII fast path. - for n++; n < len(src) && src[n] < utf8.RuneSelf; n++ { - } - continue - } - v, size := trie.lookup(src[n:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - err = transform.ErrShortSrc - } else { - n = len(src) - } - break - } - if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous { - } else { - err = transform.ErrEndOfSpan - break - } - n += size - } - return n, err -} - -func (narrowTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - for nSrc < len(src) { - if src[nSrc] < utf8.RuneSelf { - // ASCII fast path. - start, end := nSrc, len(src) - if d := len(dst) - nDst; d < end-start { - end = nSrc + d - } - for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ { - } - n := copy(dst[nDst:], src[start:nSrc]) - if nDst += n; nDst == len(dst) { - nSrc = start + n - if nSrc == len(src) { - return nDst, nSrc, nil - } - if src[nSrc] < utf8.RuneSelf { - return nDst, nSrc, transform.ErrShortDst - } - } - continue - } - v, size := trie.lookup(src[nSrc:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - return nDst, nSrc, transform.ErrShortSrc - } - size = 1 // gobble 1 byte - } - if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous { - if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { - return nDst, nSrc, transform.ErrShortDst - } - nDst += size - } else { - data := inverseData[byte(v)] - if len(dst)-nDst < int(data[0]) { - return nDst, nSrc, transform.ErrShortDst - } - i := 1 - for end := int(data[0]); i < end; i++ { - dst[nDst] = data[i] - nDst++ - } - dst[nDst] = data[i] ^ src[nSrc+size-1] - nDst++ - } - nSrc += size - } - return nDst, nSrc, nil -} - -type wideTransform struct { - transform.NopResetter -} - -func (wideTransform) Span(src []byte, atEOF bool) (n int, err error) { - for n < len(src) { - // TODO: Consider ASCII fast path. Special-casing ASCII handling can - // reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably - // not enough to warrant the extra code and complexity. - v, size := trie.lookup(src[n:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - err = transform.ErrShortSrc - } else { - n = len(src) - } - break - } - if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow { - } else { - err = transform.ErrEndOfSpan - break - } - n += size - } - return n, err -} - -func (wideTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - for nSrc < len(src) { - // TODO: Consider ASCII fast path. Special-casing ASCII handling can - // reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably - // not enough to warrant the extra code and complexity. - v, size := trie.lookup(src[nSrc:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - return nDst, nSrc, transform.ErrShortSrc - } - size = 1 // gobble 1 byte - } - if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow { - if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { - return nDst, nSrc, transform.ErrShortDst - } - nDst += size - } else { - data := inverseData[byte(v)] - if len(dst)-nDst < int(data[0]) { - return nDst, nSrc, transform.ErrShortDst - } - i := 1 - for end := int(data[0]); i < end; i++ { - dst[nDst] = data[i] - nDst++ - } - dst[nDst] = data[i] ^ src[nSrc+size-1] - nDst++ - } - nSrc += size - } - return nDst, nSrc, nil -} diff --git a/vendor/golang.org/x/text/width/trieval.go b/vendor/golang.org/x/text/width/trieval.go deleted file mode 100644 index ca8e45fd1..000000000 --- a/vendor/golang.org/x/text/width/trieval.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -package width - -// elem is an entry of the width trie. The high byte is used to encode the type -// of the rune. The low byte is used to store the index to a mapping entry in -// the inverseData array. -type elem uint16 - -const ( - tagNeutral elem = iota << typeShift - tagAmbiguous - tagWide - tagNarrow - tagFullwidth - tagHalfwidth -) - -const ( - numTypeBits = 3 - typeShift = 16 - numTypeBits - - // tagNeedsFold is true for all fullwidth and halfwidth runes except for - // the Won sign U+20A9. - tagNeedsFold = 0x1000 - - // The Korean Won sign is halfwidth, but SHOULD NOT be mapped to a wide - // variant. - wonSign rune = 0x20A9 -) diff --git a/vendor/golang.org/x/text/width/width.go b/vendor/golang.org/x/text/width/width.go deleted file mode 100644 index 29c7509be..000000000 --- a/vendor/golang.org/x/text/width/width.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate stringer -type=Kind -//go:generate go run gen.go gen_common.go gen_trieval.go - -// Package width provides functionality for handling different widths in text. -// -// Wide characters behave like ideographs; they tend to allow line breaks after -// each character and remain upright in vertical text layout. Narrow characters -// are kept together in words or runs that are rotated sideways in vertical text -// layout. -// -// For more information, see https://unicode.org/reports/tr11/. -package width // import "golang.org/x/text/width" - -import ( - "unicode/utf8" - - "golang.org/x/text/transform" -) - -// TODO -// 1) Reduce table size by compressing blocks. -// 2) API proposition for computing display length -// (approximation, fixed pitch only). -// 3) Implement display length. - -// Kind indicates the type of width property as defined in https://unicode.org/reports/tr11/. -type Kind int - -const ( - // Neutral characters do not occur in legacy East Asian character sets. - Neutral Kind = iota - - // EastAsianAmbiguous characters that can be sometimes wide and sometimes - // narrow and require additional information not contained in the character - // code to further resolve their width. - EastAsianAmbiguous - - // EastAsianWide characters are wide in its usual form. They occur only in - // the context of East Asian typography. These runes may have explicit - // halfwidth counterparts. - EastAsianWide - - // EastAsianNarrow characters are narrow in its usual form. They often have - // fullwidth counterparts. - EastAsianNarrow - - // Note: there exist Narrow runes that do not have fullwidth or wide - // counterparts, despite what the definition says (e.g. U+27E6). - - // EastAsianFullwidth characters have a compatibility decompositions of type - // wide that map to a narrow counterpart. - EastAsianFullwidth - - // EastAsianHalfwidth characters have a compatibility decomposition of type - // narrow that map to a wide or ambiguous counterpart, plus U+20A9 ₩ WON - // SIGN. - EastAsianHalfwidth - - // Note: there exist runes that have a halfwidth counterparts but that are - // classified as Ambiguous, rather than wide (e.g. U+2190). -) - -// TODO: the generated tries need to return size 1 for invalid runes for the -// width to be computed correctly (each byte should render width 1) - -var trie = newWidthTrie(0) - -// Lookup reports the Properties of the first rune in b and the number of bytes -// of its UTF-8 encoding. -func Lookup(b []byte) (p Properties, size int) { - v, sz := trie.lookup(b) - return Properties{elem(v), b[sz-1]}, sz -} - -// LookupString reports the Properties of the first rune in s and the number of -// bytes of its UTF-8 encoding. -func LookupString(s string) (p Properties, size int) { - v, sz := trie.lookupString(s) - return Properties{elem(v), s[sz-1]}, sz -} - -// LookupRune reports the Properties of rune r. -func LookupRune(r rune) Properties { - var buf [4]byte - n := utf8.EncodeRune(buf[:], r) - v, _ := trie.lookup(buf[:n]) - last := byte(r) - if r >= utf8.RuneSelf { - last = 0x80 + byte(r&0x3f) - } - return Properties{elem(v), last} -} - -// Properties provides access to width properties of a rune. -type Properties struct { - elem elem - last byte -} - -func (e elem) kind() Kind { - return Kind(e >> typeShift) -} - -// Kind returns the Kind of a rune as defined in Unicode TR #11. -// See https://unicode.org/reports/tr11/ for more details. -func (p Properties) Kind() Kind { - return p.elem.kind() -} - -// Folded returns the folded variant of a rune or 0 if the rune is canonical. -func (p Properties) Folded() rune { - if p.elem&tagNeedsFold != 0 { - buf := inverseData[byte(p.elem)] - buf[buf[0]] ^= p.last - r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) - return r - } - return 0 -} - -// Narrow returns the narrow variant of a rune or 0 if the rune is already -// narrow or doesn't have a narrow variant. -func (p Properties) Narrow() rune { - if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianFullwidth || k == EastAsianWide || k == EastAsianAmbiguous) { - buf := inverseData[byte(p.elem)] - buf[buf[0]] ^= p.last - r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) - return r - } - return 0 -} - -// Wide returns the wide variant of a rune or 0 if the rune is already -// wide or doesn't have a wide variant. -func (p Properties) Wide() rune { - if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianHalfwidth || k == EastAsianNarrow) { - buf := inverseData[byte(p.elem)] - buf[buf[0]] ^= p.last - r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) - return r - } - return 0 -} - -// TODO for Properties: -// - Add Fullwidth/Halfwidth or Inverted methods for computing variants -// mapping. -// - Add width information (including information on non-spacing runes). - -// Transformer implements the transform.Transformer interface. -type Transformer struct { - t transform.SpanningTransformer -} - -// Reset implements the transform.Transformer interface. -func (t Transformer) Reset() { t.t.Reset() } - -// Transform implements the transform.Transformer interface. -func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - return t.t.Transform(dst, src, atEOF) -} - -// Span implements the transform.SpanningTransformer interface. -func (t Transformer) Span(src []byte, atEOF bool) (n int, err error) { - return t.t.Span(src, atEOF) -} - -// Bytes returns a new byte slice with the result of applying t to b. -func (t Transformer) Bytes(b []byte) []byte { - b, _, _ = transform.Bytes(t, b) - return b -} - -// String returns a string with the result of applying t to s. -func (t Transformer) String(s string) string { - s, _, _ = transform.String(t, s) - return s -} - -var ( - // Fold is a transform that maps all runes to their canonical width. - // - // Note that the NFKC and NFKD transforms in golang.org/x/text/unicode/norm - // provide a more generic folding mechanism. - Fold Transformer = Transformer{foldTransform{}} - - // Widen is a transform that maps runes to their wide variant, if - // available. - Widen Transformer = Transformer{wideTransform{}} - - // Narrow is a transform that maps runes to their narrow variant, if - // available. - Narrow Transformer = Transformer{narrowTransform{}} -) - -// TODO: Consider the following options: -// - Treat Ambiguous runes that have a halfwidth counterpart as wide, or some -// generalized variant of this. -// - Consider a wide Won character to be the default width (or some generalized -// variant of this). -// - Filter the set of characters that gets converted (the preferred approach is -// to allow applying filters to transforms). diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index b0b982e9c..f0e0cf3cb 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -80,6 +80,19 @@ func (lim *Limiter) Burst() int { return lim.burst } +// TokensAt returns the number of tokens available at time t. +func (lim *Limiter) TokensAt(t time.Time) float64 { + lim.mu.Lock() + _, tokens := lim.advance(t) // does not mutate lim + lim.mu.Unlock() + return tokens +} + +// Tokens returns the number of tokens available now. +func (lim *Limiter) Tokens() float64 { + return lim.TokensAt(time.Now()) +} + // NewLimiter returns a new Limiter that allows events up to rate r and permits // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { @@ -89,16 +102,16 @@ func NewLimiter(r Limit, b int) *Limiter { } } -// Allow is shorthand for AllowN(time.Now(), 1). +// Allow reports whether an event may happen now. func (lim *Limiter) Allow() bool { return lim.AllowN(time.Now(), 1) } -// AllowN reports whether n events may happen at time now. +// AllowN reports whether n events may happen at time t. // Use this method if you intend to drop / skip events that exceed the rate limit. // Otherwise use Reserve or Wait. -func (lim *Limiter) AllowN(now time.Time, n int) bool { - return lim.reserveN(now, n, 0).ok +func (lim *Limiter) AllowN(t time.Time, n int) bool { + return lim.reserveN(t, n, 0).ok } // A Reservation holds information about events that are permitted by a Limiter to happen after a delay. @@ -125,17 +138,17 @@ func (r *Reservation) Delay() time.Duration { } // InfDuration is the duration returned by Delay when a Reservation is not OK. -const InfDuration = time.Duration(1<<63 - 1) +const InfDuration = time.Duration(math.MaxInt64) // DelayFrom returns the duration for which the reservation holder must wait // before taking the reserved action. Zero duration means act immediately. // InfDuration means the limiter cannot grant the tokens requested in this // Reservation within the maximum wait time. -func (r *Reservation) DelayFrom(now time.Time) time.Duration { +func (r *Reservation) DelayFrom(t time.Time) time.Duration { if !r.ok { return InfDuration } - delay := r.timeToAct.Sub(now) + delay := r.timeToAct.Sub(t) if delay < 0 { return 0 } @@ -150,7 +163,7 @@ func (r *Reservation) Cancel() { // CancelAt indicates that the reservation holder will not perform the reserved action // and reverses the effects of this Reservation on the rate limit as much as possible, // considering that other reservations may have already been made. -func (r *Reservation) CancelAt(now time.Time) { +func (r *Reservation) CancelAt(t time.Time) { if !r.ok { return } @@ -158,7 +171,7 @@ func (r *Reservation) CancelAt(now time.Time) { r.lim.mu.Lock() defer r.lim.mu.Unlock() - if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(t) { return } @@ -170,18 +183,18 @@ func (r *Reservation) CancelAt(now time.Time) { return } // advance time to now - now, _, tokens := r.lim.advance(now) + t, tokens := r.lim.advance(t) // calculate new number of tokens tokens += restoreTokens if burst := float64(r.lim.burst); tokens > burst { tokens = burst } // update state - r.lim.last = now + r.lim.last = t r.lim.tokens = tokens if r.timeToAct == r.lim.lastEvent { prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) - if !prevEvent.Before(now) { + if !prevEvent.Before(t) { r.lim.lastEvent = prevEvent } } @@ -196,18 +209,20 @@ func (lim *Limiter) Reserve() *Reservation { // The Limiter takes this Reservation into account when allowing future events. // The returned Reservation’s OK() method returns false if n exceeds the Limiter's burst size. // Usage example: -// r := lim.ReserveN(time.Now(), 1) -// if !r.OK() { -// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? -// return -// } -// time.Sleep(r.Delay()) -// Act() +// +// r := lim.ReserveN(time.Now(), 1) +// if !r.OK() { +// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? +// return +// } +// time.Sleep(r.Delay()) +// Act() +// // Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. // If you need to respect a deadline or cancel the delay, use Wait instead. // To drop or skip events exceeding rate limit, use Allow instead. -func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { - r := lim.reserveN(now, n, InfDuration) +func (lim *Limiter) ReserveN(t time.Time, n int) *Reservation { + r := lim.reserveN(t, n, InfDuration) return &r } @@ -221,6 +236,18 @@ func (lim *Limiter) Wait(ctx context.Context) (err error) { // canceled, or the expected wait time exceeds the Context's Deadline. // The burst limit is ignored if the rate limit is Inf. func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + // The test code calls lim.wait with a fake timer generator. + // This is the real timer generator. + newTimer := func(d time.Duration) (<-chan time.Time, func() bool, func()) { + timer := time.NewTimer(d) + return timer.C, timer.Stop, func() {} + } + + return lim.wait(ctx, n, time.Now(), newTimer) +} + +// wait is the internal implementation of WaitN. +func (lim *Limiter) wait(ctx context.Context, n int, t time.Time, newTimer func(d time.Duration) (<-chan time.Time, func() bool, func())) error { lim.mu.Lock() burst := lim.burst limit := lim.limit @@ -236,25 +263,25 @@ func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { default: } // Determine wait limit - now := time.Now() waitLimit := InfDuration if deadline, ok := ctx.Deadline(); ok { - waitLimit = deadline.Sub(now) + waitLimit = deadline.Sub(t) } // Reserve - r := lim.reserveN(now, n, waitLimit) + r := lim.reserveN(t, n, waitLimit) if !r.ok { return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) } // Wait if necessary - delay := r.DelayFrom(now) + delay := r.DelayFrom(t) if delay == 0 { return nil } - t := time.NewTimer(delay) - defer t.Stop() + ch, stop, advance := newTimer(delay) + defer stop() + advance() // only has an effect when testing select { - case <-t.C: + case <-ch: // We can proceed. return nil case <-ctx.Done(): @@ -273,13 +300,13 @@ func (lim *Limiter) SetLimit(newLimit Limit) { // SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated // or underutilized by those which reserved (using Reserve or Wait) but did not yet act // before SetLimitAt was called. -func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { +func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.limit = newLimit } @@ -290,13 +317,13 @@ func (lim *Limiter) SetBurst(newBurst int) { } // SetBurstAt sets a new burst size for the limiter. -func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { +func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.burst = newBurst } @@ -304,7 +331,7 @@ func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { // reserveN is a helper method for AllowN, ReserveN, and WaitN. // maxFutureReserve specifies the maximum reservation wait duration allowed. // reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. -func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { +func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) Reservation { lim.mu.Lock() defer lim.mu.Unlock() @@ -313,7 +340,7 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio ok: true, lim: lim, tokens: n, - timeToAct: now, + timeToAct: t, } } else if lim.limit == 0 { var ok bool @@ -325,11 +352,11 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio ok: ok, lim: lim, tokens: lim.burst, - timeToAct: now, + timeToAct: t, } } - now, last, tokens := lim.advance(now) + t, tokens := lim.advance(t) // Calculate the remaining number of tokens resulting from the request. tokens -= float64(n) @@ -351,16 +378,12 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio } if ok { r.tokens = n - r.timeToAct = now.Add(waitDuration) - } + r.timeToAct = t.Add(waitDuration) - // Update state - if ok { - lim.last = now + // Update state + lim.last = t lim.tokens = tokens lim.lastEvent = r.timeToAct - } else { - lim.last = last } return r @@ -369,20 +392,20 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio // advance calculates and returns an updated state for lim resulting from the passage of time. // lim is not changed. // advance requires that lim.mu is held. -func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { +func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { last := lim.last - if now.Before(last) { - last = now + if t.Before(last) { + last = t } // Calculate the new number of tokens, due to time that passed. - elapsed := now.Sub(last) + elapsed := t.Sub(last) delta := lim.limit.tokensFromDuration(elapsed) tokens := lim.tokens + delta if burst := float64(lim.burst); tokens > burst { tokens = burst } - return now, last, tokens + return t, tokens } // durationFromTokens is a unit conversion function from the number of tokens to the duration diff --git a/vendor/golang.org/x/time/rate/sometimes.go b/vendor/golang.org/x/time/rate/sometimes.go new file mode 100644 index 000000000..6ba99ddb6 --- /dev/null +++ b/vendor/golang.org/x/time/rate/sometimes.go @@ -0,0 +1,67 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rate + +import ( + "sync" + "time" +) + +// Sometimes will perform an action occasionally. The First, Every, and +// Interval fields govern the behavior of Do, which performs the action. +// A zero Sometimes value will perform an action exactly once. +// +// # Example: logging with rate limiting +// +// var sometimes = rate.Sometimes{First: 3, Interval: 10*time.Second} +// func Spammy() { +// sometimes.Do(func() { log.Info("here I am!") }) +// } +type Sometimes struct { + First int // if non-zero, the first N calls to Do will run f. + Every int // if non-zero, every Nth call to Do will run f. + Interval time.Duration // if non-zero and Interval has elapsed since f's last run, Do will run f. + + mu sync.Mutex + count int // number of Do calls + last time.Time // last time f was run +} + +// Do runs the function f as allowed by First, Every, and Interval. +// +// The model is a union (not intersection) of filters. The first call to Do +// always runs f. Subsequent calls to Do run f if allowed by First or Every or +// Interval. +// +// A non-zero First:N causes the first N Do(f) calls to run f. +// +// A non-zero Every:M causes every Mth Do(f) call, starting with the first, to +// run f. +// +// A non-zero Interval causes Do(f) to run f if Interval has elapsed since +// Do last ran f. +// +// Specifying multiple filters produces the union of these execution streams. +// For example, specifying both First:N and Every:M causes the first N Do(f) +// calls and every Mth Do(f) call, starting with the first, to run f. See +// Examples for more. +// +// If Do is called multiple times simultaneously, the calls will block and run +// serially. Therefore, Do is intended for lightweight operations. +// +// Because a call to Do may block until f returns, if f causes Do to be called, +// it will deadlock. +func (s *Sometimes) Do(f func()) { + s.mu.Lock() + defer s.mu.Unlock() + if s.count == 0 || + (s.First > 0 && s.count < s.First) || + (s.Every > 0 && s.count%s.Every == 0) || + (s.Interval > 0 && time.Since(s.last) >= s.Interval) { + f() + s.last = time.Now() + } + s.count++ +} diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go index 6ac9e80ff..9a2d0bccd 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go @@ -44,10 +44,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *MatchCondition) Reset() { *m = MatchCondition{} } +func (*MatchCondition) ProtoMessage() {} +func (*MatchCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{0} +} +func (m *MatchCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchCondition.Merge(m, src) +} +func (m *MatchCondition) XXX_Size() int { + return m.Size() +} +func (m *MatchCondition) XXX_DiscardUnknown() { + xxx_messageInfo_MatchCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchCondition proto.InternalMessageInfo + func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } func (*MutatingWebhook) ProtoMessage() {} func (*MutatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{0} + return fileDescriptor_aaac5994f79683e8, []int{1} } func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +103,7 @@ var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{1} + return fileDescriptor_aaac5994f79683e8, []int{2} } func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +131,7 @@ var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{2} + return fileDescriptor_aaac5994f79683e8, []int{3} } func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +159,7 @@ var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo func (m *Rule) Reset() { *m = Rule{} } func (*Rule) ProtoMessage() {} func (*Rule) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{3} + return fileDescriptor_aaac5994f79683e8, []int{4} } func (m *Rule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +187,7 @@ var xxx_messageInfo_Rule proto.InternalMessageInfo func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } func (*RuleWithOperations) ProtoMessage() {} func (*RuleWithOperations) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{4} + return fileDescriptor_aaac5994f79683e8, []int{5} } func (m *RuleWithOperations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +215,7 @@ var xxx_messageInfo_RuleWithOperations proto.InternalMessageInfo func (m *ServiceReference) Reset() { *m = ServiceReference{} } func (*ServiceReference) ProtoMessage() {} func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{5} + return fileDescriptor_aaac5994f79683e8, []int{6} } func (m *ServiceReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +243,7 @@ var xxx_messageInfo_ServiceReference proto.InternalMessageInfo func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } func (*ValidatingWebhook) ProtoMessage() {} func (*ValidatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{6} + return fileDescriptor_aaac5994f79683e8, []int{7} } func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +271,7 @@ var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{7} + return fileDescriptor_aaac5994f79683e8, []int{8} } func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +299,7 @@ var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{8} + return fileDescriptor_aaac5994f79683e8, []int{9} } func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -299,7 +327,7 @@ var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } func (*WebhookClientConfig) ProtoMessage() {} func (*WebhookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{9} + return fileDescriptor_aaac5994f79683e8, []int{10} } func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -325,6 +353,7 @@ func (m *WebhookClientConfig) XXX_DiscardUnknown() { var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo func init() { + proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1.MatchCondition") proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhook") proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfiguration") proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfigurationList") @@ -342,79 +371,116 @@ func init() { } var fileDescriptor_aaac5994f79683e8 = []byte{ - // 1105 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0xc6, 0x76, 0x63, 0x8f, 0xf3, 0xa7, 0x19, 0xa0, 0x35, 0xa1, 0xf2, 0x5a, 0xae, 0x84, - 0x8c, 0x80, 0xdd, 0x26, 0x94, 0x52, 0x71, 0x41, 0xd9, 0xf0, 0x47, 0x11, 0x49, 0x1b, 0x4d, 0xda, - 0x14, 0xa1, 0x1c, 0x3a, 0x5e, 0x8f, 0xed, 0x21, 0xf6, 0xce, 0x6a, 0x66, 0xd6, 0x90, 0x1b, 0x1f, - 0x81, 0xaf, 0x00, 0x9f, 0x82, 0x1b, 0xe2, 0x96, 0x63, 0x8f, 0x39, 0xa0, 0x85, 0x2c, 0x17, 0x0e, - 0x7c, 0x82, 0x9c, 0xd0, 0xcc, 0xae, 0x77, 0xfd, 0x27, 0x09, 0x56, 0x0e, 0x3d, 0xe5, 0xe6, 0xf9, - 0xbd, 0x79, 0xbf, 0x37, 0xef, 0xed, 0x7b, 0xef, 0x27, 0x83, 0x9d, 0xa3, 0xc7, 0xc2, 0xa2, 0xcc, - 0x3e, 0x0a, 0x9a, 0x84, 0x7b, 0x44, 0x12, 0x61, 0x0f, 0x88, 0xd7, 0x62, 0xdc, 0x4e, 0x0c, 0xd8, - 0xa7, 0x36, 0x6e, 0xf5, 0xa9, 0x10, 0x94, 0x79, 0x9c, 0x74, 0xa8, 0x90, 0x1c, 0x4b, 0xca, 0x3c, - 0x7b, 0xb0, 0x6e, 0x77, 0x88, 0x47, 0x38, 0x96, 0xa4, 0x65, 0xf9, 0x9c, 0x49, 0x06, 0xef, 0xc7, - 0x4e, 0x16, 0xf6, 0xa9, 0x75, 0xa1, 0x93, 0x35, 0x58, 0x5f, 0xfb, 0xb0, 0x43, 0x65, 0x37, 0x68, - 0x5a, 0x2e, 0xeb, 0xdb, 0x1d, 0xd6, 0x61, 0xb6, 0xf6, 0x6d, 0x06, 0x6d, 0x7d, 0xd2, 0x07, 0xfd, - 0x2b, 0xe6, 0x5c, 0x7b, 0x98, 0x3d, 0xa4, 0x8f, 0xdd, 0x2e, 0xf5, 0x08, 0x3f, 0xb6, 0xfd, 0xa3, - 0x8e, 0x02, 0x84, 0xdd, 0x27, 0x12, 0x5f, 0xf0, 0x92, 0x35, 0xfb, 0x32, 0x2f, 0x1e, 0x78, 0x92, - 0xf6, 0xc9, 0x94, 0xc3, 0xa3, 0xff, 0x73, 0x10, 0x6e, 0x97, 0xf4, 0xf1, 0xa4, 0x5f, 0xfd, 0xb7, - 0x05, 0xb0, 0xb2, 0x1b, 0x48, 0x2c, 0xa9, 0xd7, 0x79, 0x41, 0x9a, 0x5d, 0xc6, 0x8e, 0x60, 0x0d, - 0xe4, 0x3d, 0xdc, 0x27, 0x15, 0xa3, 0x66, 0x34, 0x4a, 0xce, 0xe2, 0x49, 0x68, 0xce, 0x45, 0xa1, - 0x99, 0x7f, 0x82, 0xfb, 0x04, 0x69, 0x0b, 0xe4, 0x60, 0xd1, 0xed, 0x51, 0xe2, 0xc9, 0x2d, 0xe6, - 0xb5, 0x69, 0xa7, 0x32, 0x5f, 0x33, 0x1a, 0xe5, 0x8d, 0xc7, 0xd6, 0x0c, 0xf5, 0xb3, 0x92, 0x28, - 0x5b, 0x23, 0xfe, 0xce, 0x9b, 0x49, 0x8c, 0xc5, 0x51, 0x14, 0x8d, 0xc5, 0x80, 0x87, 0xa0, 0xc0, - 0x83, 0x1e, 0x11, 0x95, 0x5c, 0x2d, 0xd7, 0x28, 0x6f, 0x7c, 0x32, 0x53, 0x30, 0x14, 0xf4, 0xc8, - 0x0b, 0x2a, 0xbb, 0x4f, 0x7d, 0x12, 0x83, 0xc2, 0x59, 0x4a, 0x62, 0x15, 0x94, 0x4d, 0xa0, 0x98, - 0x14, 0xee, 0x80, 0xa5, 0x36, 0xa6, 0xbd, 0x80, 0x93, 0x3d, 0xd6, 0xa3, 0xee, 0x71, 0x25, 0xaf, - 0x93, 0x7f, 0x37, 0x0a, 0xcd, 0xa5, 0x2f, 0x47, 0x0d, 0xe7, 0xa1, 0xb9, 0x3a, 0x06, 0x3c, 0x3b, - 0xf6, 0x09, 0x1a, 0x77, 0x86, 0x9f, 0x83, 0x72, 0x1f, 0x4b, 0xb7, 0x9b, 0x70, 0x95, 0x34, 0x57, - 0x3d, 0x0a, 0xcd, 0xf2, 0x6e, 0x06, 0x9f, 0x87, 0xe6, 0xca, 0xc8, 0x51, 0xf3, 0x8c, 0xba, 0xc1, - 0x1f, 0xc0, 0xaa, 0xaa, 0xb6, 0xf0, 0xb1, 0x4b, 0xf6, 0x49, 0x8f, 0xb8, 0x92, 0xf1, 0x4a, 0x41, - 0x97, 0xfa, 0xa3, 0x91, 0xec, 0xd3, 0xef, 0x6d, 0xf9, 0x47, 0x1d, 0x05, 0x08, 0x4b, 0xb5, 0x95, - 0x4a, 0x7f, 0x07, 0x37, 0x49, 0x6f, 0xe8, 0xea, 0xbc, 0x15, 0x85, 0xe6, 0xea, 0x93, 0x49, 0x46, - 0x34, 0x1d, 0x04, 0x32, 0xb0, 0xcc, 0x9a, 0xdf, 0x11, 0x57, 0xa6, 0x61, 0xcb, 0xd7, 0x0f, 0x0b, - 0xa3, 0xd0, 0x5c, 0x7e, 0x3a, 0x46, 0x87, 0x26, 0xe8, 0x55, 0xc1, 0x04, 0x6d, 0x91, 0x2f, 0xda, - 0x6d, 0xe2, 0x4a, 0x51, 0xb9, 0x95, 0x15, 0x6c, 0x3f, 0x83, 0x55, 0xc1, 0xb2, 0xe3, 0x56, 0x0f, - 0x0b, 0x81, 0x46, 0xdd, 0xe0, 0xa7, 0x60, 0x59, 0xf5, 0x3a, 0x0b, 0xe4, 0x3e, 0x71, 0x99, 0xd7, - 0x12, 0x95, 0x85, 0x9a, 0xd1, 0x28, 0xc4, 0x2f, 0x78, 0x36, 0x66, 0x41, 0x13, 0x37, 0xe1, 0x73, - 0x70, 0x37, 0xed, 0x22, 0x44, 0x06, 0x94, 0x7c, 0x7f, 0x40, 0xb8, 0x3a, 0x88, 0x4a, 0xb1, 0x96, - 0x6b, 0x94, 0x9c, 0x77, 0xa2, 0xd0, 0xbc, 0xbb, 0x79, 0xf1, 0x15, 0x74, 0x99, 0x2f, 0x7c, 0x09, - 0x20, 0x27, 0xd4, 0x1b, 0x30, 0x57, 0xb7, 0x5f, 0xd2, 0x10, 0x40, 0xe7, 0xf7, 0x20, 0x0a, 0x4d, - 0x88, 0xa6, 0xac, 0xe7, 0xa1, 0x79, 0x67, 0x1a, 0xd5, 0xed, 0x71, 0x01, 0x57, 0xfd, 0xd4, 0x00, - 0xf7, 0x26, 0x26, 0x38, 0x9e, 0x98, 0x20, 0xee, 0x78, 0xf8, 0x12, 0x14, 0xd5, 0x87, 0x69, 0x61, - 0x89, 0xf5, 0x48, 0x97, 0x37, 0x1e, 0xcc, 0xf6, 0x19, 0xe3, 0x6f, 0xb6, 0x4b, 0x24, 0x76, 0x60, - 0x32, 0x34, 0x20, 0xc3, 0x50, 0xca, 0x0a, 0x0f, 0x40, 0x31, 0x89, 0x2c, 0x2a, 0xf3, 0x7a, 0x3a, - 0x1f, 0xce, 0x34, 0x9d, 0x13, 0xcf, 0x76, 0xf2, 0x2a, 0x0a, 0x4a, 0xb9, 0xea, 0xff, 0x18, 0xa0, - 0x76, 0x55, 0x6a, 0x3b, 0x54, 0x48, 0x78, 0x38, 0x95, 0x9e, 0x35, 0x63, 0x97, 0x52, 0x11, 0x27, - 0x77, 0x3b, 0x49, 0xae, 0x38, 0x44, 0x46, 0x52, 0x6b, 0x83, 0x02, 0x95, 0xa4, 0x3f, 0xcc, 0x6b, - 0xf3, 0x3a, 0x79, 0x8d, 0xbd, 0x39, 0xdb, 0x3f, 0xdb, 0x8a, 0x17, 0xc5, 0xf4, 0xf5, 0xdf, 0x0d, - 0x90, 0x57, 0x0b, 0x09, 0xbe, 0x0f, 0x4a, 0xd8, 0xa7, 0x5f, 0x71, 0x16, 0xf8, 0xa2, 0x62, 0xe8, - 0xce, 0x5b, 0x8a, 0x42, 0xb3, 0xb4, 0xb9, 0xb7, 0x1d, 0x83, 0x28, 0xb3, 0xc3, 0x75, 0x50, 0xc6, - 0x3e, 0x4d, 0x1b, 0x75, 0x5e, 0x5f, 0x5f, 0x51, 0x63, 0xb3, 0xb9, 0xb7, 0x9d, 0x36, 0xe7, 0xe8, - 0x1d, 0xc5, 0xcf, 0x89, 0x60, 0x01, 0x77, 0x93, 0x55, 0x9a, 0xf0, 0xa3, 0x21, 0x88, 0x32, 0x3b, - 0xfc, 0x00, 0x14, 0x84, 0xcb, 0x7c, 0x92, 0x6c, 0xc3, 0x3b, 0xea, 0xd9, 0xfb, 0x0a, 0x38, 0x0f, - 0xcd, 0x92, 0xfe, 0xa1, 0xdb, 0x32, 0xbe, 0x54, 0xff, 0xc5, 0x00, 0x70, 0x7a, 0xe1, 0xc2, 0xcf, - 0x00, 0x60, 0xe9, 0x29, 0x49, 0xc9, 0xd4, 0xbd, 0x94, 0xa2, 0xe7, 0xa1, 0xb9, 0x94, 0x9e, 0x34, - 0xe5, 0x88, 0x0b, 0xfc, 0x1a, 0xe4, 0xd5, 0x92, 0x4e, 0x54, 0xe6, 0xbd, 0x99, 0x17, 0x7f, 0x26, - 0x5d, 0xea, 0x84, 0x34, 0x49, 0xfd, 0x67, 0x03, 0xdc, 0xde, 0x27, 0x7c, 0x40, 0x5d, 0x82, 0x48, - 0x9b, 0x70, 0xe2, 0xb9, 0x04, 0xda, 0xa0, 0x94, 0x2e, 0xc1, 0x44, 0xf6, 0x56, 0x13, 0xdf, 0x52, - 0xba, 0x30, 0x51, 0x76, 0x27, 0x95, 0xc8, 0xf9, 0x4b, 0x25, 0xf2, 0x1e, 0xc8, 0xfb, 0x58, 0x76, - 0x2b, 0x39, 0x7d, 0xa3, 0xa8, 0xac, 0x7b, 0x58, 0x76, 0x91, 0x46, 0xb5, 0x95, 0x71, 0xa9, 0xeb, - 0x5a, 0x48, 0xac, 0x8c, 0x4b, 0xa4, 0xd1, 0xfa, 0x9f, 0xb7, 0xc0, 0xea, 0x01, 0xee, 0xd1, 0xd6, - 0x8d, 0x2c, 0xdf, 0xc8, 0xf2, 0x95, 0xb2, 0x0c, 0x6e, 0x64, 0xf9, 0x3a, 0xb2, 0x5c, 0xff, 0xc3, - 0x00, 0xd5, 0xa9, 0x09, 0x7b, 0xdd, 0xb2, 0xf9, 0xcd, 0x94, 0x6c, 0x3e, 0x9a, 0x69, 0x7a, 0xa6, - 0x1e, 0x3e, 0x25, 0x9c, 0xff, 0x1a, 0xa0, 0x7e, 0x75, 0x7a, 0xaf, 0x41, 0x3a, 0xbb, 0xe3, 0xd2, - 0xb9, 0x75, 0xbd, 0xdc, 0x66, 0x11, 0xcf, 0x5f, 0x0d, 0xf0, 0xc6, 0x05, 0xfb, 0x0b, 0xbe, 0x0d, - 0x72, 0x01, 0xef, 0x25, 0x2b, 0x78, 0x21, 0x0a, 0xcd, 0xdc, 0x73, 0xb4, 0x83, 0x14, 0x06, 0x0f, - 0xc1, 0x82, 0x88, 0x55, 0x20, 0xc9, 0xfc, 0xe3, 0x99, 0x9e, 0x37, 0xa9, 0x1c, 0x4e, 0x39, 0x0a, - 0xcd, 0x85, 0x21, 0x3a, 0xa4, 0x84, 0x0d, 0x50, 0x74, 0xb1, 0x13, 0x78, 0xad, 0x44, 0xb5, 0x16, - 0x9d, 0x45, 0x55, 0xa4, 0xad, 0xcd, 0x18, 0x43, 0xa9, 0xd5, 0xd9, 0x3e, 0x39, 0xab, 0xce, 0xbd, - 0x3a, 0xab, 0xce, 0x9d, 0x9e, 0x55, 0xe7, 0x7e, 0x8c, 0xaa, 0xc6, 0x49, 0x54, 0x35, 0x5e, 0x45, - 0x55, 0xe3, 0x34, 0xaa, 0x1a, 0x7f, 0x45, 0x55, 0xe3, 0xa7, 0xbf, 0xab, 0x73, 0xdf, 0xde, 0x9f, - 0xe1, 0xdf, 0xec, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x43, 0x44, 0x86, 0xf5, 0x0c, 0x0f, 0x00, + // 1169 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4d, 0x6f, 0x1b, 0x45, + 0x18, 0xce, 0xc6, 0x36, 0xb1, 0xc7, 0x4e, 0xd2, 0x0c, 0xd0, 0x2e, 0xa5, 0xf2, 0x5a, 0xae, 0x84, + 0x82, 0x00, 0x6f, 0x9b, 0x96, 0x52, 0x71, 0x41, 0xb1, 0x29, 0x28, 0x22, 0x69, 0xa3, 0x49, 0x3f, + 0x10, 0xea, 0xa1, 0xe3, 0xf5, 0xd8, 0x1e, 0x62, 0xef, 0xac, 0x66, 0x66, 0x4d, 0x7b, 0xe3, 0x27, + 0xf0, 0x17, 0xe0, 0x4f, 0xc0, 0x95, 0x5b, 0x8f, 0xbd, 0x91, 0x03, 0x5a, 0x91, 0xe5, 0xc2, 0x81, + 0x5f, 0x90, 0x13, 0x9a, 0xd9, 0xf5, 0xae, 0xbf, 0x12, 0x56, 0x39, 0xe4, 0x94, 0x5b, 0xe6, 0x79, + 0xdf, 0xf7, 0x79, 0xe7, 0x19, 0xbf, 0x1f, 0xab, 0x80, 0xdd, 0xc3, 0xfb, 0xa2, 0x41, 0x99, 0x7d, + 0xe8, 0xb7, 0x09, 0x77, 0x89, 0x24, 0xc2, 0x1e, 0x11, 0xb7, 0xc3, 0xb8, 0x1d, 0x1b, 0xb0, 0x47, + 0x6d, 0xdc, 0x19, 0x52, 0x21, 0x28, 0x73, 0x39, 0xe9, 0x51, 0x21, 0x39, 0x96, 0x94, 0xb9, 0xf6, + 0xe8, 0xb6, 0xdd, 0x23, 0x2e, 0xe1, 0x58, 0x92, 0x4e, 0xc3, 0xe3, 0x4c, 0x32, 0x78, 0x33, 0x0a, + 0x6a, 0x60, 0x8f, 0x36, 0x16, 0x06, 0x35, 0x46, 0xb7, 0xaf, 0x7f, 0xd2, 0xa3, 0xb2, 0xef, 0xb7, + 0x1b, 0x0e, 0x1b, 0xda, 0x3d, 0xd6, 0x63, 0xb6, 0x8e, 0x6d, 0xfb, 0x5d, 0x7d, 0xd2, 0x07, 0xfd, + 0x57, 0xc4, 0x79, 0xfd, 0x6e, 0x7a, 0x91, 0x21, 0x76, 0xfa, 0xd4, 0x25, 0xfc, 0x95, 0xed, 0x1d, + 0xf6, 0x14, 0x20, 0xec, 0x21, 0x91, 0x78, 0xc1, 0x4d, 0xae, 0xdb, 0xa7, 0x45, 0x71, 0xdf, 0x95, + 0x74, 0x48, 0xe6, 0x02, 0xee, 0xfd, 0x5f, 0x80, 0x70, 0xfa, 0x64, 0x88, 0x67, 0xe3, 0xea, 0x5d, + 0xb0, 0xb6, 0x87, 0xa5, 0xd3, 0x6f, 0x31, 0xb7, 0x43, 0x95, 0x44, 0x58, 0x03, 0x79, 0x17, 0x0f, + 0x89, 0x69, 0xd4, 0x8c, 0xcd, 0x52, 0xb3, 0xf2, 0x3a, 0xb0, 0x96, 0xc2, 0xc0, 0xca, 0x3f, 0xc4, + 0x43, 0x82, 0xb4, 0x05, 0x6e, 0x01, 0x40, 0x5e, 0x7a, 0x9c, 0xe8, 0xe7, 0x31, 0x97, 0xb5, 0x1f, + 0x8c, 0xfd, 0xc0, 0x83, 0xc4, 0x82, 0x26, 0xbc, 0xea, 0xbf, 0x16, 0xc1, 0xfa, 0x9e, 0x2f, 0xb1, + 0xa4, 0x6e, 0xef, 0x19, 0x69, 0xf7, 0x19, 0x3b, 0xcc, 0x90, 0x89, 0x83, 0x8a, 0x33, 0xa0, 0xc4, + 0x95, 0x2d, 0xe6, 0x76, 0x69, 0x4f, 0xe7, 0x2a, 0x6f, 0xdd, 0x6f, 0x64, 0xf8, 0x9d, 0x1a, 0x71, + 0x96, 0xd6, 0x44, 0x7c, 0xf3, 0x9d, 0x38, 0x47, 0x65, 0x12, 0x45, 0x53, 0x39, 0xe0, 0x73, 0x50, + 0xe0, 0xfe, 0x80, 0x08, 0x33, 0x57, 0xcb, 0x6d, 0x96, 0xb7, 0x3e, 0xcb, 0x94, 0x0c, 0xf9, 0x03, + 0xf2, 0x8c, 0xca, 0xfe, 0x23, 0x8f, 0x44, 0xa0, 0x68, 0xae, 0xc6, 0xb9, 0x0a, 0xca, 0x26, 0x50, + 0x44, 0x0a, 0x77, 0xc1, 0x6a, 0x17, 0xd3, 0x81, 0xcf, 0xc9, 0x3e, 0x1b, 0x50, 0xe7, 0x95, 0x99, + 0xd7, 0xe2, 0x3f, 0x08, 0x03, 0x6b, 0xf5, 0xab, 0x49, 0xc3, 0x49, 0x60, 0x6d, 0x4c, 0x01, 0x8f, + 0x5f, 0x79, 0x04, 0x4d, 0x07, 0xc3, 0x2f, 0x41, 0x79, 0xa8, 0x7e, 0xbd, 0x98, 0xab, 0xa4, 0xb9, + 0xea, 0x61, 0x60, 0x95, 0xf7, 0x52, 0xf8, 0x24, 0xb0, 0xd6, 0x27, 0x8e, 0x9a, 0x67, 0x32, 0x0c, + 0xbe, 0x04, 0x1b, 0xea, 0xb5, 0x85, 0x87, 0x1d, 0x72, 0x40, 0x06, 0xc4, 0x91, 0x8c, 0x9b, 0x05, + 0xfd, 0xd4, 0x77, 0x26, 0xd4, 0x27, 0x75, 0xd5, 0xf0, 0x0e, 0x7b, 0x0a, 0x10, 0x0d, 0x55, 0xbe, + 0x4a, 0xfe, 0x2e, 0x6e, 0x93, 0xc1, 0x38, 0xb4, 0xf9, 0x6e, 0x18, 0x58, 0x1b, 0x0f, 0x67, 0x19, + 0xd1, 0x7c, 0x12, 0xc8, 0xc0, 0x1a, 0x6b, 0x7f, 0x4f, 0x1c, 0x99, 0xa4, 0x2d, 0x9f, 0x3f, 0x2d, + 0x0c, 0x03, 0x6b, 0xed, 0xd1, 0x14, 0x1d, 0x9a, 0xa1, 0x57, 0x0f, 0x26, 0x68, 0x87, 0x3c, 0xe8, + 0x76, 0x89, 0x23, 0x85, 0xf9, 0x56, 0xfa, 0x60, 0x07, 0x29, 0xac, 0x1e, 0x2c, 0x3d, 0xb6, 0x06, + 0x58, 0x08, 0x34, 0x19, 0x06, 0x3f, 0x07, 0x6b, 0xaa, 0xa7, 0x98, 0x2f, 0x0f, 0x88, 0xc3, 0xdc, + 0x8e, 0x30, 0x57, 0x6a, 0xc6, 0x66, 0x21, 0xba, 0xc1, 0xe3, 0x29, 0x0b, 0x9a, 0xf1, 0x84, 0x4f, + 0xc0, 0xb5, 0xa4, 0x8a, 0x10, 0x19, 0x51, 0xf2, 0xc3, 0x53, 0xc2, 0xd5, 0x41, 0x98, 0xc5, 0x5a, + 0x6e, 0xb3, 0xd4, 0x7c, 0x3f, 0x0c, 0xac, 0x6b, 0xdb, 0x8b, 0x5d, 0xd0, 0x69, 0xb1, 0xf0, 0x05, + 0x80, 0x9c, 0x50, 0x77, 0xc4, 0x1c, 0x5d, 0x7e, 0x71, 0x41, 0x00, 0xad, 0xef, 0x56, 0x18, 0x58, + 0x10, 0xcd, 0x59, 0x4f, 0x02, 0xeb, 0xea, 0x3c, 0xaa, 0xcb, 0x63, 0x01, 0x17, 0x1c, 0x81, 0xf5, + 0xe1, 0xd4, 0xa4, 0x10, 0x66, 0x45, 0x77, 0xc8, 0x9d, 0x4c, 0x1d, 0x32, 0x3d, 0x65, 0x9a, 0xd7, + 0xe2, 0xee, 0x58, 0x9f, 0xc6, 0x05, 0x9a, 0x4d, 0x52, 0x3f, 0x32, 0xc0, 0x8d, 0x99, 0xc9, 0x11, + 0x75, 0xaa, 0x1f, 0x91, 0xc3, 0x17, 0xa0, 0xa8, 0x0a, 0xa2, 0x83, 0x25, 0xd6, 0xa3, 0xa4, 0xbc, + 0x75, 0x2b, 0x5b, 0xf9, 0x44, 0xb5, 0xb2, 0x47, 0x24, 0x4e, 0xc7, 0x57, 0x8a, 0xa1, 0x84, 0x15, + 0x3e, 0x05, 0xc5, 0x38, 0xb3, 0x30, 0x97, 0xb5, 0xe6, 0xbb, 0xd9, 0x34, 0x4f, 0x5f, 0xbb, 0x99, + 0x57, 0x59, 0x50, 0xc2, 0x55, 0xff, 0xc7, 0x00, 0xb5, 0xb3, 0xa4, 0xed, 0x52, 0x21, 0xe1, 0xf3, + 0x39, 0x79, 0x8d, 0x8c, 0xdd, 0x41, 0x45, 0x24, 0xee, 0x4a, 0x2c, 0xae, 0x38, 0x46, 0x26, 0xa4, + 0x75, 0x41, 0x81, 0x4a, 0x32, 0x1c, 0xeb, 0xda, 0x3e, 0x8f, 0xae, 0xa9, 0x3b, 0xa7, 0x73, 0x6f, + 0x47, 0xf1, 0xa2, 0x88, 0xbe, 0xfe, 0xbb, 0x01, 0xf2, 0x6a, 0x10, 0xc2, 0x8f, 0x40, 0x09, 0x7b, + 0xf4, 0x6b, 0xce, 0x7c, 0x4f, 0x98, 0x86, 0xae, 0xf8, 0xd5, 0x30, 0xb0, 0x4a, 0xdb, 0xfb, 0x3b, + 0x11, 0x88, 0x52, 0x3b, 0xbc, 0x0d, 0xca, 0xd8, 0xa3, 0x49, 0x83, 0x2c, 0x6b, 0xf7, 0x75, 0xd5, + 0xae, 0xdb, 0xfb, 0x3b, 0x49, 0x53, 0x4c, 0xfa, 0x28, 0x7e, 0x4e, 0x04, 0xf3, 0xb9, 0x13, 0x8f, + 0xf0, 0x98, 0x1f, 0x8d, 0x41, 0x94, 0xda, 0xe1, 0xc7, 0xa0, 0x20, 0x1c, 0xe6, 0x91, 0x78, 0x0a, + 0x5f, 0x55, 0xd7, 0x3e, 0x50, 0xc0, 0x49, 0x60, 0x95, 0xf4, 0x1f, 0xba, 0x1d, 0x22, 0xa7, 0xfa, + 0x2f, 0x06, 0x80, 0xf3, 0x83, 0x1e, 0x7e, 0x01, 0x00, 0x4b, 0x4e, 0xb1, 0x24, 0x4b, 0xd7, 0x52, + 0x82, 0x9e, 0x04, 0xd6, 0x6a, 0x72, 0xd2, 0x94, 0x13, 0x21, 0xf0, 0x1b, 0x90, 0x57, 0xcb, 0x21, + 0xde, 0x6e, 0x1f, 0x66, 0x5e, 0x38, 0xe9, 0xca, 0x54, 0x27, 0xa4, 0x49, 0xea, 0x3f, 0x1b, 0xe0, + 0xca, 0x01, 0xe1, 0x23, 0xea, 0x10, 0x44, 0xba, 0x84, 0x13, 0xd7, 0x21, 0xd0, 0x06, 0xa5, 0x64, + 0xf8, 0xc6, 0xeb, 0x76, 0x23, 0x8e, 0x2d, 0x25, 0x83, 0x1a, 0xa5, 0x3e, 0xc9, 0x6a, 0x5e, 0x3e, + 0x75, 0x35, 0xdf, 0x00, 0x79, 0x0f, 0xcb, 0xbe, 0x99, 0xd3, 0x1e, 0x45, 0x65, 0xdd, 0xc7, 0xb2, + 0x8f, 0x34, 0xaa, 0xad, 0x8c, 0x4b, 0xfd, 0xae, 0x85, 0xd8, 0xca, 0xb8, 0x44, 0x1a, 0xad, 0xff, + 0xb1, 0x02, 0x36, 0x9e, 0xe2, 0x01, 0xed, 0x5c, 0x7e, 0x0e, 0x5c, 0x7e, 0x0e, 0x9c, 0xf9, 0x39, + 0x00, 0x2e, 0x3f, 0x07, 0xce, 0xf5, 0x39, 0xb0, 0x60, 0x59, 0x97, 0x2f, 0x62, 0x59, 0xff, 0x69, + 0x80, 0xea, 0x5c, 0x67, 0x5f, 0xf4, 0xba, 0xfe, 0x76, 0x6e, 0x5d, 0xdf, 0xcb, 0xa4, 0x7a, 0xee, + 0xe2, 0x73, 0x0b, 0xfb, 0x5f, 0x03, 0xd4, 0xcf, 0x96, 0x77, 0x01, 0x2b, 0xbb, 0x3f, 0xbd, 0xb2, + 0x5b, 0xe7, 0xd3, 0x96, 0x65, 0x69, 0xff, 0x66, 0x80, 0xb7, 0x17, 0xcc, 0x4d, 0xf8, 0x1e, 0xc8, + 0xf9, 0x7c, 0x10, 0x8f, 0xfe, 0x95, 0x30, 0xb0, 0x72, 0x4f, 0xd0, 0x2e, 0x52, 0x18, 0x7c, 0x0e, + 0x56, 0x44, 0xb4, 0x7d, 0x62, 0xe5, 0x9f, 0x66, 0xba, 0xde, 0xec, 0xc6, 0x6a, 0x96, 0xc3, 0xc0, + 0x5a, 0x19, 0xa3, 0x63, 0x4a, 0xb8, 0x09, 0x8a, 0x0e, 0x6e, 0xfa, 0x6e, 0x27, 0xde, 0x96, 0x95, + 0x66, 0x45, 0x3d, 0x52, 0x6b, 0x3b, 0xc2, 0x50, 0x62, 0x6d, 0xee, 0xbc, 0x3e, 0xae, 0x2e, 0xbd, + 0x39, 0xae, 0x2e, 0x1d, 0x1d, 0x57, 0x97, 0x7e, 0x0c, 0xab, 0xc6, 0xeb, 0xb0, 0x6a, 0xbc, 0x09, + 0xab, 0xc6, 0x51, 0x58, 0x35, 0xfe, 0x0a, 0xab, 0xc6, 0x4f, 0x7f, 0x57, 0x97, 0xbe, 0xbb, 0x99, + 0xe1, 0xbf, 0x04, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x7f, 0xe1, 0x3a, 0x73, 0x64, 0x10, 0x00, 0x00, } +func (m *MatchCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -435,6 +501,20 @@ func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } if m.ObjectSelector != nil { { size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -791,6 +871,20 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } if m.ObjectSelector != nil { { size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -1036,6 +1130,19 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *MatchCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *MutatingWebhook) Size() (n int) { if m == nil { return 0 @@ -1085,6 +1192,12 @@ func (m *MutatingWebhook) Size() (n int) { l = m.ObjectSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1235,6 +1348,12 @@ func (m *ValidatingWebhook) Size() (n int) { l = m.ObjectSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1299,6 +1418,17 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *MatchCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MatchCondition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} func (this *MutatingWebhook) String() string { if this == nil { return "nil" @@ -1308,6 +1438,11 @@ func (this *MutatingWebhook) String() string { repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," } repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" s := strings.Join([]string{`&MutatingWebhook{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, @@ -1320,6 +1455,7 @@ func (this *MutatingWebhook) String() string { `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, `}`, }, "") return s @@ -1402,6 +1538,11 @@ func (this *ValidatingWebhook) String() string { repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," } repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" s := strings.Join([]string{`&ValidatingWebhook{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, @@ -1413,6 +1554,7 @@ func (this *ValidatingWebhook) String() string { `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, `}`, }, "") return s @@ -1469,6 +1611,120 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *MatchCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1853,6 +2109,40 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2920,6 +3210,40 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1/generated.proto index a53605b58..a8903621c 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.proto @@ -28,6 +28,35 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/admissionregistration/v1"; +// MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook. +message MatchCondition { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + optional string name = 1; + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + optional string expression = 2; +} + // MutatingWebhook describes an admission webhook and the resources and operations it applies to. message MutatingWebhook { // The name of the admission webhook. @@ -173,6 +202,28 @@ message MutatingWebhook { // Defaults to "Never". // +optional optional string reinvocationPolicy = 10; + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + repeated MatchCondition matchConditions = 12; } // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. @@ -205,11 +256,13 @@ message Rule { // APIGroups is the API groups the resources belong to. '*' is all groups. // If '*' is present, the length of the slice must be one. // Required. + // +listType=atomic repeated string apiGroups = 1; // APIVersions is the API versions the resources belong to. '*' is all versions. // If '*' is present, the length of the slice must be one. // Required. + // +listType=atomic repeated string apiVersions = 2; // Resources is a list of resources this rule applies to. @@ -227,6 +280,7 @@ message Rule { // // Depending on the enclosing object, subresources might not be allowed. // Required. + // +listType=atomic repeated string resources = 3; // scope specifies the scope of this rule. @@ -249,6 +303,7 @@ message RuleWithOperations { // for all of those operations and any future admission operations that are added. // If '*' is present, the length of the slice must be one. // Required. + // +listType=atomic repeated string operations = 1; // Rule is embedded, it describes other criteria of the rule, like @@ -405,6 +460,28 @@ message ValidatingWebhook { // include any versions known to the API Server, calls to the webhook will fail // and be subject to the failure policy. repeated string admissionReviewVersions = 8; + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + repeated MatchCondition matchConditions = 11; } // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. diff --git a/vendor/k8s.io/api/admissionregistration/v1/types.go b/vendor/k8s.io/api/admissionregistration/v1/types.go index 29873b796..07ed7a624 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1/types.go @@ -26,11 +26,13 @@ type Rule struct { // APIGroups is the API groups the resources belong to. '*' is all groups. // If '*' is present, the length of the slice must be one. // Required. + // +listType=atomic APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` // APIVersions is the API versions the resources belong to. '*' is all versions. // If '*' is present, the length of the slice must be one. // Required. + // +listType=atomic APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` // Resources is a list of resources this rule applies to. @@ -48,6 +50,7 @@ type Rule struct { // // Depending on the enclosing object, subresources might not be allowed. // Required. + // +listType=atomic Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` // scope specifies the scope of this rule. @@ -304,6 +307,28 @@ type ValidatingWebhook struct { // include any versions known to the API Server, calls to the webhook will fail // and be subject to the failure policy. AdmissionReviewVersions []string `json:"admissionReviewVersions" protobuf:"bytes,8,rep,name=admissionReviewVersions"` + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,11,opt,name=matchConditions"` } // MutatingWebhook describes an admission webhook and the resources and operations it applies to. @@ -451,6 +476,28 @@ type MutatingWebhook struct { // Defaults to "Never". // +optional ReinvocationPolicy *ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,12,opt,name=matchConditions"` } // ReinvocationPolicyType specifies what type of policy the admission hook uses. @@ -474,6 +521,7 @@ type RuleWithOperations struct { // for all of those operations and any future admission operations that are added. // If '*' is present, the length of the slice must be one. // Required. + // +listType=atomic Operations []OperationType `json:"operations,omitempty" protobuf:"bytes,1,rep,name=operations,casttype=OperationType"` // Rule is embedded, it describes other criteria of the rule, like // APIGroups, APIVersions, Resources, etc. @@ -559,3 +607,32 @@ type ServiceReference struct { // +optional Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` } + +// MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook. +type MatchCondition struct { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + Expression string `json:"expression" protobuf:"bytes,2,opt,name=expression"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go index ba92729c3..c41cceb2f 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go @@ -24,9 +24,19 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_MatchCondition = map[string]string{ + "": "MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook.", + "name": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.", + "expression": "Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\n\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\n\nRequired.", +} + +func (MatchCondition) SwaggerDoc() map[string]string { + return map_MatchCondition +} + var map_MutatingWebhook = map[string]string{ "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", @@ -40,6 +50,7 @@ var map_MutatingWebhook = map[string]string{ "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.", } func (MutatingWebhook) SwaggerDoc() map[string]string { @@ -111,6 +122,7 @@ var map_ValidatingWebhook = map[string]string{ "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.", } func (ValidatingWebhook) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go index cff7377a5..b95609913 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go @@ -26,6 +26,22 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchCondition) DeepCopyInto(out *MatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchCondition. +func (in *MatchCondition) DeepCopy() *MatchCondition { + if in == nil { + return nil + } + out := new(MatchCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = *in @@ -77,6 +93,11 @@ func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = new(ReinvocationPolicyType) **out = **in } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } @@ -286,6 +307,11 @@ func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go new file mode 100644 index 000000000..385c60e0d --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +groupName=admissionregistration.k8s.io + +// Package v1alpha1 is the v1alpha1 version of the API. +package v1alpha1 // import "k8s.io/api/admissionregistration/v1alpha1" diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go new file mode 100644 index 000000000..4f1373ec5 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go @@ -0,0 +1,4634 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} } +func (*AuditAnnotation) ProtoMessage() {} +func (*AuditAnnotation) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{0} +} +func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditAnnotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditAnnotation.Merge(m, src) +} +func (m *AuditAnnotation) XXX_Size() int { + return m.Size() +} +func (m *AuditAnnotation) XXX_DiscardUnknown() { + xxx_messageInfo_AuditAnnotation.DiscardUnknown(m) +} + +var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo + +func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} } +func (*ExpressionWarning) ProtoMessage() {} +func (*ExpressionWarning) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{1} +} +func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExpressionWarning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExpressionWarning) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExpressionWarning.Merge(m, src) +} +func (m *ExpressionWarning) XXX_Size() int { + return m.Size() +} +func (m *ExpressionWarning) XXX_DiscardUnknown() { + xxx_messageInfo_ExpressionWarning.DiscardUnknown(m) +} + +var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo + +func (m *MatchCondition) Reset() { *m = MatchCondition{} } +func (*MatchCondition) ProtoMessage() {} +func (*MatchCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{2} +} +func (m *MatchCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchCondition.Merge(m, src) +} +func (m *MatchCondition) XXX_Size() int { + return m.Size() +} +func (m *MatchCondition) XXX_DiscardUnknown() { + xxx_messageInfo_MatchCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchCondition proto.InternalMessageInfo + +func (m *MatchResources) Reset() { *m = MatchResources{} } +func (*MatchResources) ProtoMessage() {} +func (*MatchResources) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{3} +} +func (m *MatchResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchResources.Merge(m, src) +} +func (m *MatchResources) XXX_Size() int { + return m.Size() +} +func (m *MatchResources) XXX_DiscardUnknown() { + xxx_messageInfo_MatchResources.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchResources proto.InternalMessageInfo + +func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} } +func (*NamedRuleWithOperations) ProtoMessage() {} +func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{4} +} +func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedRuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedRuleWithOperations) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedRuleWithOperations.Merge(m, src) +} +func (m *NamedRuleWithOperations) XXX_Size() int { + return m.Size() +} +func (m *NamedRuleWithOperations) XXX_DiscardUnknown() { + xxx_messageInfo_NamedRuleWithOperations.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo + +func (m *ParamKind) Reset() { *m = ParamKind{} } +func (*ParamKind) ProtoMessage() {} +func (*ParamKind) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{5} +} +func (m *ParamKind) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParamKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParamKind) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParamKind.Merge(m, src) +} +func (m *ParamKind) XXX_Size() int { + return m.Size() +} +func (m *ParamKind) XXX_DiscardUnknown() { + xxx_messageInfo_ParamKind.DiscardUnknown(m) +} + +var xxx_messageInfo_ParamKind proto.InternalMessageInfo + +func (m *ParamRef) Reset() { *m = ParamRef{} } +func (*ParamRef) ProtoMessage() {} +func (*ParamRef) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{6} +} +func (m *ParamRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParamRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParamRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParamRef.Merge(m, src) +} +func (m *ParamRef) XXX_Size() int { + return m.Size() +} +func (m *ParamRef) XXX_DiscardUnknown() { + xxx_messageInfo_ParamRef.DiscardUnknown(m) +} + +var xxx_messageInfo_ParamRef proto.InternalMessageInfo + +func (m *TypeChecking) Reset() { *m = TypeChecking{} } +func (*TypeChecking) ProtoMessage() {} +func (*TypeChecking) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{7} +} +func (m *TypeChecking) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeChecking) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeChecking) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeChecking.Merge(m, src) +} +func (m *TypeChecking) XXX_Size() int { + return m.Size() +} +func (m *TypeChecking) XXX_DiscardUnknown() { + xxx_messageInfo_TypeChecking.DiscardUnknown(m) +} + +var xxx_messageInfo_TypeChecking proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} } +func (*ValidatingAdmissionPolicy) ProtoMessage() {} +func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{8} +} +func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicy.Merge(m, src) +} +func (m *ValidatingAdmissionPolicy) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} } +func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {} +func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{9} +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyBinding.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} } +func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {} +func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{10} +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyBindingList.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} } +func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {} +func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{11} +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} } +func (*ValidatingAdmissionPolicyList) ProtoMessage() {} +func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{12} +} +func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyList.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} } +func (*ValidatingAdmissionPolicySpec) ProtoMessage() {} +func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{13} +} +func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicySpec.Merge(m, src) +} +func (m *ValidatingAdmissionPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} } +func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {} +func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{14} +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyStatus.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo + +func (m *Validation) Reset() { *m = Validation{} } +func (*Validation) ProtoMessage() {} +func (*Validation) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{15} +} +func (m *Validation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Validation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Validation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Validation.Merge(m, src) +} +func (m *Validation) XXX_Size() int { + return m.Size() +} +func (m *Validation) XXX_DiscardUnknown() { + xxx_messageInfo_Validation.DiscardUnknown(m) +} + +var xxx_messageInfo_Validation proto.InternalMessageInfo + +func (m *Variable) Reset() { *m = Variable{} } +func (*Variable) ProtoMessage() {} +func (*Variable) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{16} +} +func (m *Variable) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Variable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Variable) XXX_Merge(src proto.Message) { + xxx_messageInfo_Variable.Merge(m, src) +} +func (m *Variable) XXX_Size() int { + return m.Size() +} +func (m *Variable) XXX_DiscardUnknown() { + xxx_messageInfo_Variable.DiscardUnknown(m) +} + +var xxx_messageInfo_Variable proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1alpha1.AuditAnnotation") + proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1alpha1.ExpressionWarning") + proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1alpha1.MatchCondition") + proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1alpha1.MatchResources") + proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1alpha1.NamedRuleWithOperations") + proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1alpha1.ParamKind") + proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1alpha1.ParamRef") + proto.RegisterType((*TypeChecking)(nil), "k8s.io.api.admissionregistration.v1alpha1.TypeChecking") + proto.RegisterType((*ValidatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy") + proto.RegisterType((*ValidatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding") + proto.RegisterType((*ValidatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingList") + proto.RegisterType((*ValidatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingSpec") + proto.RegisterType((*ValidatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyList") + proto.RegisterType((*ValidatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec") + proto.RegisterType((*ValidatingAdmissionPolicyStatus)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus") + proto.RegisterType((*Validation)(nil), "k8s.io.api.admissionregistration.v1alpha1.Validation") + proto.RegisterType((*Variable)(nil), "k8s.io.api.admissionregistration.v1alpha1.Variable") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto", fileDescriptor_c3be8d256e3ae3cf) +} + +var fileDescriptor_c3be8d256e3ae3cf = []byte{ + // 1509 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcb, 0x6f, 0x1b, 0x45, + 0x18, 0xcf, 0xc6, 0x6e, 0x12, 0x8f, 0xf3, 0xf2, 0xd0, 0x2a, 0x6e, 0xa0, 0xde, 0x68, 0x55, 0xa1, + 0x46, 0x82, 0x35, 0x49, 0x0b, 0x85, 0x0a, 0x09, 0x65, 0xfb, 0xa2, 0x8f, 0x3c, 0x34, 0x45, 0x89, + 0x84, 0x40, 0x62, 0xb2, 0x3b, 0x71, 0xa6, 0xf6, 0x3e, 0xd8, 0x59, 0x9b, 0x46, 0x20, 0x51, 0x89, + 0x0b, 0xdc, 0x38, 0x70, 0xe1, 0xca, 0x9f, 0xc0, 0x7f, 0xc0, 0xad, 0xc7, 0x1e, 0xcb, 0x01, 0x8b, + 0x9a, 0x0b, 0x7f, 0x01, 0x48, 0xb9, 0x80, 0x66, 0x76, 0xf6, 0x69, 0x9b, 0xd8, 0x25, 0x70, 0xf3, + 0x7c, 0x8f, 0xdf, 0xf7, 0x98, 0xef, 0xfb, 0xf6, 0x1b, 0x03, 0xd4, 0x7c, 0x9b, 0xe9, 0xd4, 0xad, + 0x37, 0xdb, 0xfb, 0xc4, 0x77, 0x48, 0x40, 0x58, 0xbd, 0x43, 0x1c, 0xcb, 0xf5, 0xeb, 0x92, 0x81, + 0x3d, 0x5a, 0xc7, 0x96, 0x4d, 0x19, 0xa3, 0xae, 0xe3, 0x93, 0x06, 0x65, 0x81, 0x8f, 0x03, 0xea, + 0x3a, 0xf5, 0xce, 0x1a, 0x6e, 0x79, 0x87, 0x78, 0xad, 0xde, 0x20, 0x0e, 0xf1, 0x71, 0x40, 0x2c, + 0xdd, 0xf3, 0xdd, 0xc0, 0x85, 0xab, 0xa1, 0xaa, 0x8e, 0x3d, 0xaa, 0x0f, 0x54, 0xd5, 0x23, 0xd5, + 0xe5, 0xd7, 0x1b, 0x34, 0x38, 0x6c, 0xef, 0xeb, 0xa6, 0x6b, 0xd7, 0x1b, 0x6e, 0xc3, 0xad, 0x0b, + 0x84, 0xfd, 0xf6, 0x81, 0x38, 0x89, 0x83, 0xf8, 0x15, 0x22, 0x2f, 0x5f, 0x1e, 0xc1, 0xa9, 0xbc, + 0x3b, 0xcb, 0x57, 0x12, 0x25, 0x1b, 0x9b, 0x87, 0xd4, 0x21, 0xfe, 0x51, 0xdd, 0x6b, 0x36, 0x38, + 0x81, 0xd5, 0x6d, 0x12, 0xe0, 0x41, 0x5a, 0xf5, 0x61, 0x5a, 0x7e, 0xdb, 0x09, 0xa8, 0x4d, 0xfa, + 0x14, 0xde, 0x3a, 0x49, 0x81, 0x99, 0x87, 0xc4, 0xc6, 0x79, 0x3d, 0x8d, 0x81, 0x85, 0x8d, 0xb6, + 0x45, 0x83, 0x0d, 0xc7, 0x71, 0x03, 0x11, 0x04, 0xbc, 0x00, 0x0a, 0x4d, 0x72, 0x54, 0x55, 0x56, + 0x94, 0x4b, 0x25, 0xa3, 0xfc, 0xa4, 0xab, 0x4e, 0xf4, 0xba, 0x6a, 0xe1, 0x1e, 0x39, 0x42, 0x9c, + 0x0e, 0x37, 0xc0, 0x42, 0x07, 0xb7, 0xda, 0xe4, 0xe6, 0x23, 0xcf, 0x27, 0x22, 0x05, 0xd5, 0x49, + 0x21, 0xba, 0x24, 0x45, 0x17, 0x76, 0xb3, 0x6c, 0x94, 0x97, 0xd7, 0x5a, 0xa0, 0x92, 0x9c, 0xf6, + 0xb0, 0xef, 0x50, 0xa7, 0x01, 0x5f, 0x03, 0x33, 0x07, 0x94, 0xb4, 0x2c, 0x44, 0x0e, 0x24, 0xe0, + 0xa2, 0x04, 0x9c, 0xb9, 0x25, 0xe9, 0x28, 0x96, 0x80, 0xab, 0x60, 0xfa, 0xb3, 0x50, 0xb1, 0x5a, + 0x10, 0xc2, 0x0b, 0x52, 0x78, 0x5a, 0xe2, 0xa1, 0x88, 0xaf, 0x1d, 0x80, 0xf9, 0x4d, 0x1c, 0x98, + 0x87, 0xd7, 0x5d, 0xc7, 0xa2, 0x22, 0xc2, 0x15, 0x50, 0x74, 0xb0, 0x4d, 0x64, 0x88, 0xb3, 0x52, + 0xb3, 0xb8, 0x85, 0x6d, 0x82, 0x04, 0x07, 0xae, 0x03, 0x40, 0xf2, 0xf1, 0x41, 0x29, 0x07, 0x52, + 0xa1, 0xa5, 0xa4, 0xb4, 0x9f, 0x8b, 0xd2, 0x10, 0x22, 0xcc, 0x6d, 0xfb, 0x26, 0x61, 0xf0, 0x11, + 0xa8, 0x70, 0x38, 0xe6, 0x61, 0x93, 0x3c, 0x20, 0x2d, 0x62, 0x06, 0xae, 0x2f, 0xac, 0x96, 0xd7, + 0x2f, 0xeb, 0x49, 0x9d, 0xc6, 0x37, 0xa6, 0x7b, 0xcd, 0x06, 0x27, 0x30, 0x9d, 0x17, 0x86, 0xde, + 0x59, 0xd3, 0xef, 0xe3, 0x7d, 0xd2, 0x8a, 0x54, 0x8d, 0x73, 0xbd, 0xae, 0x5a, 0xd9, 0xca, 0x23, + 0xa2, 0x7e, 0x23, 0xd0, 0x05, 0xf3, 0xee, 0xfe, 0x43, 0x62, 0x06, 0xb1, 0xd9, 0xc9, 0x17, 0x37, + 0x0b, 0x7b, 0x5d, 0x75, 0x7e, 0x3b, 0x03, 0x87, 0x72, 0xf0, 0xf0, 0x4b, 0x30, 0xe7, 0xcb, 0xb8, + 0x51, 0xbb, 0x45, 0x58, 0xb5, 0xb0, 0x52, 0xb8, 0x54, 0x5e, 0x37, 0xf4, 0x91, 0xdb, 0x51, 0xe7, + 0x81, 0x59, 0x5c, 0x79, 0x8f, 0x06, 0x87, 0xdb, 0x1e, 0x09, 0xf9, 0xcc, 0x38, 0x27, 0x13, 0x3f, + 0x87, 0xd2, 0x06, 0x50, 0xd6, 0x1e, 0xfc, 0x4e, 0x01, 0x67, 0xc9, 0x23, 0xb3, 0xd5, 0xb6, 0x48, + 0x46, 0xae, 0x5a, 0x3c, 0x35, 0x47, 0x5e, 0x91, 0x8e, 0x9c, 0xbd, 0x39, 0xc0, 0x0e, 0x1a, 0x68, + 0x1d, 0xde, 0x00, 0x65, 0x9b, 0x17, 0xc5, 0x8e, 0xdb, 0xa2, 0xe6, 0x51, 0x75, 0x5a, 0x94, 0x92, + 0xd6, 0xeb, 0xaa, 0xe5, 0xcd, 0x84, 0x7c, 0xdc, 0x55, 0x17, 0x52, 0xc7, 0x0f, 0x8e, 0x3c, 0x82, + 0xd2, 0x6a, 0xda, 0x33, 0x05, 0x2c, 0x0d, 0xf1, 0x0a, 0x5e, 0x4d, 0x32, 0x2f, 0x4a, 0xa3, 0xaa, + 0xac, 0x14, 0x2e, 0x95, 0x8c, 0x4a, 0x3a, 0x63, 0x82, 0x81, 0xb2, 0x72, 0xf0, 0x2b, 0x05, 0x40, + 0xbf, 0x0f, 0x4f, 0x16, 0xca, 0xd5, 0x51, 0xf2, 0xa5, 0x0f, 0x48, 0xd2, 0xb2, 0x4c, 0x12, 0xec, + 0xe7, 0xa1, 0x01, 0xe6, 0x34, 0x0c, 0x4a, 0x3b, 0xd8, 0xc7, 0xf6, 0x3d, 0xea, 0x58, 0xbc, 0xef, + 0xb0, 0x47, 0x77, 0x89, 0x2f, 0xfa, 0x4e, 0xc9, 0xf6, 0xdd, 0xc6, 0xce, 0x1d, 0xc9, 0x41, 0x29, + 0x29, 0xde, 0xcd, 0x4d, 0xea, 0x58, 0xb2, 0x4b, 0xe3, 0x6e, 0xe6, 0x78, 0x48, 0x70, 0xb4, 0x1f, + 0x27, 0xc1, 0x8c, 0xb0, 0xc1, 0x27, 0xc7, 0xc9, 0xcd, 0x5f, 0x07, 0xa5, 0xb8, 0xa1, 0x24, 0x6a, + 0x45, 0x8a, 0x95, 0xe2, 0xe6, 0x43, 0x89, 0x0c, 0xfc, 0x18, 0xcc, 0xb0, 0xa8, 0xcd, 0x0a, 0x2f, + 0xde, 0x66, 0xb3, 0x7c, 0xd6, 0xc5, 0x0d, 0x16, 0x43, 0xc2, 0x00, 0x2c, 0x79, 0xdc, 0x7b, 0x12, + 0x10, 0x7f, 0xcb, 0x0d, 0x6e, 0xb9, 0x6d, 0xc7, 0xda, 0x30, 0x79, 0xf6, 0xaa, 0x45, 0xe1, 0xdd, + 0xb5, 0x5e, 0x57, 0x5d, 0xda, 0x19, 0x2c, 0x72, 0xdc, 0x55, 0x5f, 0x1e, 0xc2, 0x12, 0x65, 0x36, + 0x0c, 0x5a, 0xfb, 0x5e, 0x01, 0xb3, 0x5c, 0xe2, 0xfa, 0x21, 0x31, 0x9b, 0x7c, 0x40, 0x7f, 0xad, + 0x00, 0x48, 0xf2, 0x63, 0x3b, 0xac, 0xb6, 0xf2, 0xfa, 0xbb, 0x63, 0xb4, 0x57, 0xdf, 0xec, 0x4f, + 0x6a, 0xa6, 0x8f, 0xc5, 0xd0, 0x00, 0x9b, 0xda, 0x2f, 0x93, 0xe0, 0xfc, 0x2e, 0x6e, 0x51, 0x0b, + 0x07, 0xd4, 0x69, 0x6c, 0x44, 0xe6, 0xc2, 0x66, 0x81, 0x9f, 0x80, 0x19, 0x9e, 0x60, 0x0b, 0x07, + 0x58, 0x0e, 0xdb, 0x37, 0x46, 0xbb, 0x8e, 0x70, 0xc4, 0x6d, 0x92, 0x00, 0x27, 0x45, 0x97, 0xd0, + 0x50, 0x8c, 0x0a, 0x1f, 0x82, 0x22, 0xf3, 0x88, 0x29, 0x5b, 0xe5, 0xfd, 0x31, 0x62, 0x1f, 0xea, + 0xf5, 0x03, 0x8f, 0x98, 0x49, 0x35, 0xf2, 0x13, 0x12, 0x36, 0xa0, 0x0f, 0xa6, 0x58, 0x80, 0x83, + 0x36, 0x93, 0xa5, 0x75, 0xf7, 0x54, 0xac, 0x09, 0x44, 0x63, 0x5e, 0xda, 0x9b, 0x0a, 0xcf, 0x48, + 0x5a, 0xd2, 0xfe, 0x54, 0xc0, 0xca, 0x50, 0x5d, 0x83, 0x3a, 0x16, 0xaf, 0x87, 0xff, 0x3e, 0xcd, + 0x9f, 0x66, 0xd2, 0xbc, 0x7d, 0x1a, 0x81, 0x4b, 0xe7, 0x87, 0x65, 0x5b, 0xfb, 0x43, 0x01, 0x17, + 0x4f, 0x52, 0xbe, 0x4f, 0x59, 0x00, 0x3f, 0xea, 0x8b, 0x5e, 0x1f, 0xb1, 0xe7, 0x29, 0x0b, 0x63, + 0x8f, 0xd7, 0x9b, 0x88, 0x92, 0x8a, 0xdc, 0x03, 0x67, 0x68, 0x40, 0x6c, 0x3e, 0x8c, 0x79, 0x77, + 0xdd, 0x3b, 0xc5, 0xd0, 0x8d, 0x39, 0x69, 0xf7, 0xcc, 0x1d, 0x6e, 0x01, 0x85, 0x86, 0xb4, 0x6f, + 0x0a, 0x27, 0x07, 0xce, 0xf3, 0xc4, 0x47, 0xb4, 0x27, 0x88, 0x5b, 0xc9, 0x14, 0x8d, 0xaf, 0x71, + 0x27, 0xe6, 0xa0, 0x94, 0x14, 0x1f, 0x90, 0x9e, 0x9c, 0xbf, 0x03, 0xf6, 0x90, 0x93, 0x22, 0x8a, + 0x46, 0x77, 0x38, 0x20, 0xa3, 0x13, 0x8a, 0x21, 0x61, 0x1b, 0xcc, 0xdb, 0x99, 0xc5, 0x4b, 0xb6, + 0xca, 0x3b, 0x63, 0x18, 0xc9, 0x6e, 0x6e, 0xe1, 0xca, 0x93, 0xa5, 0xa1, 0x9c, 0x11, 0xb8, 0x07, + 0x2a, 0x1d, 0x99, 0x31, 0xd7, 0x09, 0xa7, 0x66, 0xb8, 0x6d, 0x94, 0x8c, 0x55, 0xbe, 0xa8, 0xed, + 0xe6, 0x99, 0xc7, 0x5d, 0x75, 0x31, 0x4f, 0x44, 0xfd, 0x18, 0xda, 0xef, 0x0a, 0xb8, 0x30, 0xf4, + 0x2e, 0xfe, 0x87, 0xea, 0xa3, 0xd9, 0xea, 0xbb, 0x71, 0x2a, 0xd5, 0x37, 0xb8, 0xec, 0x7e, 0x98, + 0xfa, 0x87, 0x50, 0x45, 0xbd, 0x61, 0x50, 0xf2, 0xa2, 0xfd, 0x40, 0xc6, 0x7a, 0x65, 0xdc, 0xe2, + 0xe1, 0xba, 0xc6, 0x1c, 0xff, 0x7e, 0xc7, 0x47, 0x94, 0xa0, 0xc2, 0xcf, 0xc1, 0xa2, 0x2d, 0x5f, + 0x08, 0x1c, 0x80, 0x3a, 0x41, 0xb4, 0x05, 0xfd, 0x8b, 0x0a, 0x3a, 0xdb, 0xeb, 0xaa, 0x8b, 0x9b, + 0x39, 0x58, 0xd4, 0x67, 0x08, 0xb6, 0x40, 0x39, 0xa9, 0x80, 0x68, 0x6d, 0x7e, 0xf3, 0x05, 0x52, + 0xee, 0x3a, 0xc6, 0x4b, 0x32, 0xc7, 0xe5, 0x84, 0xc6, 0x50, 0x1a, 0x1e, 0xde, 0x07, 0x73, 0x07, + 0x98, 0xb6, 0xda, 0x3e, 0x91, 0x0b, 0x69, 0xb8, 0x41, 0xbc, 0xca, 0x97, 0xc5, 0x5b, 0x69, 0xc6, + 0x71, 0x57, 0xad, 0x64, 0x08, 0x62, 0x5b, 0xc8, 0x2a, 0xc3, 0xc7, 0x0a, 0x58, 0xc4, 0xd9, 0xe7, + 0x23, 0xab, 0x9e, 0x11, 0x11, 0x5c, 0x1b, 0x23, 0x82, 0xdc, 0x0b, 0xd4, 0xa8, 0xca, 0x30, 0x16, + 0x73, 0x0c, 0x86, 0xfa, 0xac, 0xc1, 0x2f, 0xc0, 0x82, 0x9d, 0x79, 0xdd, 0xb1, 0xea, 0x94, 0x70, + 0x60, 0xec, 0xab, 0x8b, 0x11, 0x92, 0x97, 0x6c, 0x96, 0xce, 0x50, 0xde, 0x14, 0xb4, 0x40, 0xa9, + 0x83, 0x7d, 0x8a, 0xf7, 0xf9, 0x43, 0x63, 0x5a, 0xd8, 0xbd, 0x3c, 0xd6, 0xd5, 0x85, 0xba, 0xc9, + 0x7e, 0x19, 0x51, 0x18, 0x4a, 0x80, 0xb5, 0x9f, 0x26, 0x81, 0x7a, 0xc2, 0xa7, 0x1c, 0xde, 0x05, + 0xd0, 0xdd, 0x67, 0xc4, 0xef, 0x10, 0xeb, 0x76, 0xf8, 0xc6, 0x8f, 0x36, 0xe8, 0x42, 0xb2, 0x5e, + 0x6d, 0xf7, 0x49, 0xa0, 0x01, 0x5a, 0xd0, 0x06, 0xb3, 0x41, 0x6a, 0xf3, 0x1b, 0xe7, 0x45, 0x20, + 0x03, 0x4b, 0x2f, 0x8e, 0xc6, 0x62, 0xaf, 0xab, 0x66, 0x56, 0x49, 0x94, 0x81, 0x87, 0x26, 0x00, + 0x66, 0x72, 0x7b, 0x61, 0x03, 0xd4, 0x47, 0x1b, 0x67, 0xc9, 0x9d, 0xc5, 0x9f, 0xa0, 0xd4, 0x75, + 0xa5, 0x60, 0xb5, 0xbf, 0x14, 0x00, 0x92, 0xae, 0x80, 0x17, 0x41, 0xea, 0x19, 0x2f, 0xbf, 0x62, + 0x45, 0x0e, 0x81, 0x52, 0x74, 0xb8, 0x0a, 0xa6, 0x6d, 0xc2, 0x18, 0x6e, 0x44, 0xef, 0x80, 0xf8, + 0x5f, 0x86, 0xcd, 0x90, 0x8c, 0x22, 0x3e, 0xdc, 0x03, 0x53, 0x3e, 0xc1, 0xcc, 0x75, 0xe4, 0xff, + 0x11, 0xef, 0xf1, 0xb5, 0x0a, 0x09, 0xca, 0x71, 0x57, 0x5d, 0x1b, 0xe5, 0x5f, 0x20, 0x5d, 0x6e, + 0x61, 0x42, 0x09, 0x49, 0x38, 0x78, 0x1b, 0x54, 0xa4, 0x8d, 0x94, 0xc3, 0x61, 0xd7, 0x9e, 0x97, + 0xde, 0x54, 0x36, 0xf3, 0x02, 0xa8, 0x5f, 0x47, 0xbb, 0x0b, 0x66, 0xa2, 0xea, 0x82, 0x55, 0x50, + 0x4c, 0x7d, 0xbe, 0xc3, 0xc0, 0x05, 0x25, 0x97, 0x98, 0xc9, 0xc1, 0x89, 0x31, 0xb6, 0x9f, 0x3c, + 0xaf, 0x4d, 0x3c, 0x7d, 0x5e, 0x9b, 0x78, 0xf6, 0xbc, 0x36, 0xf1, 0xb8, 0x57, 0x53, 0x9e, 0xf4, + 0x6a, 0xca, 0xd3, 0x5e, 0x4d, 0x79, 0xd6, 0xab, 0x29, 0xbf, 0xf6, 0x6a, 0xca, 0xb7, 0xbf, 0xd5, + 0x26, 0x3e, 0x5c, 0x1d, 0xf9, 0x5f, 0xbc, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xad, 0xe2, 0x61, + 0x96, 0x0a, 0x14, 0x00, 0x00, +} + +func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditAnnotation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditAnnotation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ValueExpression) + copy(dAtA[i:], m.ValueExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValueExpression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExpressionWarning) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExpressionWarning) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExpressionWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Warning) + copy(dAtA[i:], m.Warning) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Warning))) + i-- + dAtA[i] = 0x1a + i -= len(m.FieldRef) + copy(dAtA[i:], m.FieldRef) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldRef))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} + +func (m *MatchCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MatchResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x3a + } + if len(m.ExcludeResourceRules) > 0 { + for iNdEx := len(m.ExcludeResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ExcludeResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ParamKind) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ParamRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ParameterNotFoundAction != nil { + i -= len(*m.ParameterNotFoundAction) + copy(dAtA[i:], *m.ParameterNotFoundAction) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction))) + i-- + dAtA[i] = 0x22 + } + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TypeChecking) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ExpressionWarnings) > 0 { + for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ValidationActions) > 0 { + for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ValidationActions[iNdEx]) + copy(dAtA[i:], m.ValidationActions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.MatchResources != nil { + { + size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ParamRef != nil { + { + size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.PolicyName) + copy(dAtA[i:], m.PolicyName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Variables) > 0 { + for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.AuditAnnotations) > 0 { + for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Validations) > 0 { + for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.MatchConstraints != nil { + { + size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ParamKind != nil { + { + size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.TypeChecking != nil { + { + size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *Validation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Validation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MessageExpression) + copy(dAtA[i:], m.MessageExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + i-- + dAtA[i] = 0x22 + if m.Reason != nil { + i -= len(*m.Reason) + copy(dAtA[i:], *m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Variable) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Variable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AuditAnnotation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ValueExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExpressionWarning) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FieldRef) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Warning) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MatchCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MatchResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ExcludeResourceRules) > 0 { + for _, e := range m.ExcludeResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NamedRuleWithOperations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RuleWithOperations.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ParamKind) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ParamRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ParameterNotFoundAction != nil { + l = len(*m.ParameterNotFoundAction) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TypeChecking) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ExpressionWarnings) > 0 { + for _, e := range m.ExpressionWarnings { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ValidatingAdmissionPolicyBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PolicyName) + n += 1 + l + sovGenerated(uint64(l)) + if m.ParamRef != nil { + l = m.ParamRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MatchResources != nil { + l = m.MatchResources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ValidationActions) > 0 { + for _, s := range m.ValidationActions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParamKind != nil { + l = m.ParamKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MatchConstraints != nil { + l = m.MatchConstraints.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Validations) > 0 { + for _, e := range m.Validations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AuditAnnotations) > 0 { + for _, e := range m.AuditAnnotations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Variables) > 0 { + for _, e := range m.Variables { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if m.TypeChecking != nil { + l = m.TypeChecking.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Validation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.Reason != nil { + l = len(*m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.MessageExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Variable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AuditAnnotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditAnnotation{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`, + `}`, + }, "") + return s +} +func (this *ExpressionWarning) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExpressionWarning{`, + `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`, + `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`, + `}`, + }, "") + return s +} +func (this *MatchCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MatchCondition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *MatchResources) String() string { + if this == nil { + return "nil" + } + repeatedStringForResourceRules := "[]NamedRuleWithOperations{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{" + for _, f := range this.ExcludeResourceRules { + repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForExcludeResourceRules += "}" + s := strings.Join([]string{`&MatchResources{`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *NamedRuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedRuleWithOperations{`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ParamKind) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParamKind{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *ParamRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParamRef{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`, + `}`, + }, "") + return s +} +func (this *TypeChecking) String() string { + if this == nil { + return "nil" + } + repeatedStringForExpressionWarnings := "[]ExpressionWarning{" + for _, f := range this.ExpressionWarnings { + repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + "," + } + repeatedStringForExpressionWarnings += "}" + s := strings.Join([]string{`&TypeChecking{`, + `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBindingSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`, + `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, + `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, + `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, + `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingAdmissionPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForValidations := "[]Validation{" + for _, f := range this.Validations { + repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + "," + } + repeatedStringForValidations += "}" + repeatedStringForAuditAnnotations := "[]AuditAnnotation{" + for _, f := range this.AuditAnnotations { + repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + "," + } + repeatedStringForAuditAnnotations += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + repeatedStringForVariables := "[]Variable{" + for _, f := range this.Variables { + repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + "," + } + repeatedStringForVariables += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`, + `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, + `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, + `Validations:` + repeatedStringForValidations + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `Variables:` + repeatedStringForVariables + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *Validation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Validation{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + valueToStringGenerated(this.Reason) + `,`, + `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, + `}`, + }, "") + return s +} +func (this *Variable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Variable{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValueExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldRef = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warning = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MatchCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MatchResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{}) + if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedRuleWithOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedRuleWithOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedRuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuleWithOperations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RuleWithOperations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParamKind) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParamKind: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParamKind: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParamRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParamRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParamRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParameterNotFoundAction", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ParameterNotFoundActionType(dAtA[iNdEx:postIndex]) + m.ParameterNotFoundAction = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TypeChecking) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeChecking: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeChecking: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpressionWarnings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExpressionWarnings = append(m.ExpressionWarnings, ExpressionWarning{}) + if err := m.ExpressionWarnings[len(m.ExpressionWarnings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ValidatingAdmissionPolicyBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyBindingSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PolicyName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PolicyName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParamRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ParamRef == nil { + m.ParamRef = &ParamRef{} + } + if err := m.ParamRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MatchResources == nil { + m.MatchResources = &MatchResources{} + } + if err := m.MatchResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidationActions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidationActions = append(m.ValidationActions, ValidationAction(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ValidatingAdmissionPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParamKind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ParamKind == nil { + m.ParamKind = &ParamKind{} + } + if err := m.ParamKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConstraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MatchConstraints == nil { + m.MatchConstraints = &MatchResources{} + } + if err := m.MatchConstraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Validations = append(m.Validations, Validation{}) + if err := m.Validations[len(m.Validations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuditAnnotations = append(m.AuditAnnotations, AuditAnnotation{}) + if err := m.AuditAnnotations[len(m.AuditAnnotations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Variables", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Variables = append(m.Variables, Variable{}) + if err := m.Variables[len(m.Variables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeChecking", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TypeChecking == nil { + m.TypeChecking = &TypeChecking{} + } + if err := m.TypeChecking.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Validation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Validation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Validation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_apimachinery_pkg_apis_meta_v1.StatusReason(dAtA[iNdEx:postIndex]) + m.Reason = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MessageExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Variable) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Variable: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Variable: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto new file mode 100644 index 000000000..db02dd929 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto @@ -0,0 +1,609 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.admissionregistration.v1alpha1; + +import "k8s.io/api/admissionregistration/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/admissionregistration/v1alpha1"; + +// AuditAnnotation describes how to produce an audit annotation for an API request. +message AuditAnnotation { + // key specifies the audit annotation key. The audit annotation keys of + // a ValidatingAdmissionPolicy must be unique. The key must be a qualified + // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. + // + // The key is combined with the resource name of the + // ValidatingAdmissionPolicy to construct an audit annotation key: + // "{ValidatingAdmissionPolicy name}/{key}". + // + // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy + // and the same audit annotation key, the annotation key will be identical. + // In this case, the first annotation written with the key will be included + // in the audit event and all subsequent annotations with the same key + // will be discarded. + // + // Required. + optional string key = 1; + + // valueExpression represents the expression which is evaluated by CEL to + // produce an audit annotation value. The expression must evaluate to either + // a string or null value. If the expression evaluates to a string, the + // audit annotation is included with the string value. If the expression + // evaluates to null or empty string the audit annotation will be omitted. + // The valueExpression may be no longer than 5kb in length. + // If the result of the valueExpression is more than 10kb in length, it + // will be truncated to 10kb. + // + // If multiple ValidatingAdmissionPolicyBinding resources match an + // API request, then the valueExpression will be evaluated for + // each binding. All unique values produced by the valueExpressions + // will be joined together in a comma-separated list. + // + // Required. + optional string valueExpression = 2; +} + +// ExpressionWarning is a warning information that targets a specific expression. +message ExpressionWarning { + // The path to the field that refers the expression. + // For example, the reference to the expression of the first item of + // validations is "spec.validations[0].expression" + optional string fieldRef = 2; + + // The content of type checking information in a human-readable form. + // Each line of the warning contains the type that the expression is checked + // against, followed by the type check error from the compiler. + optional string warning = 3; +} + +message MatchCondition { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + optional string name = 1; + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + optional string expression = 2; +} + +// MatchResources decides whether to run the admission control policy on an object based +// on whether it meets the match criteria. +// The exclude rules take precedence over include rules (if a resource matches both, it is excluded) +// +structType=atomic +message MatchResources { + // NamespaceSelector decides whether to run the admission control policy on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the policy. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the policy on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; + + // ObjectSelector decides whether to run the validation based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the cel validation, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; + + // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. + // The policy cares about an operation if it matches _any_ Rule. + // +listType=atomic + // +optional + repeated NamedRuleWithOperations resourceRules = 3; + + // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. + // The exclude rules take precedence over include rules (if a resource matches both, it is excluded) + // +listType=atomic + // +optional + repeated NamedRuleWithOperations excludeResourceRules = 4; + + // matchPolicy defines how the "MatchResources" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy. + // + // Defaults to "Equivalent" + // +optional + optional string matchPolicy = 7; +} + +// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames. +// +structType=atomic +message NamedRuleWithOperations { + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +listType=atomic + // +optional + repeated string resourceNames = 1; + + // RuleWithOperations is a tuple of Operations and Resources. + optional k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2; +} + +// ParamKind is a tuple of Group Kind and Version. +// +structType=atomic +message ParamKind { + // APIVersion is the API group version the resources belong to. + // In format of "group/version". + // Required. + optional string apiVersion = 1; + + // Kind is the API kind the resources belong to. + // Required. + optional string kind = 2; +} + +// ParamRef describes how to locate the params to be used as input to +// expressions of rules applied by a policy binding. +// +structType=atomic +message ParamRef { + // `name` is the name of the resource being referenced. + // + // `name` and `selector` are mutually exclusive properties. If one is set, + // the other must be unset. + // + // +optional + optional string name = 1; + + // namespace is the namespace of the referenced resource. Allows limiting + // the search for params to a specific namespace. Applies to both `name` and + // `selector` fields. + // + // A per-namespace parameter may be used by specifying a namespace-scoped + // `paramKind` in the policy and leaving this field empty. + // + // - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this + // field results in a configuration error. + // + // - If `paramKind` is namespace-scoped, the namespace of the object being + // evaluated for admission will be used when this field is left unset. Take + // care that if this is left empty the binding must not match any cluster-scoped + // resources, which will result in an error. + // + // +optional + optional string namespace = 2; + + // selector can be used to match multiple param objects based on their labels. + // Supply selector: {} to match all resources of the ParamKind. + // + // If multiple params are found, they are all evaluated with the policy expressions + // and the results are ANDed together. + // + // One of `name` or `selector` must be set, but `name` and `selector` are + // mutually exclusive properties. If one is set, the other must be unset. + // + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + + // `parameterNotFoundAction` controls the behavior of the binding when the resource + // exists, and name or selector is valid, but there are no parameters + // matched by the binding. If the value is set to `Allow`, then no + // matched parameters will be treated as successful validation by the binding. + // If set to `Deny`, then no matched parameters will be subject to the + // `failurePolicy` of the policy. + // + // Allowed values are `Allow` or `Deny` + // Default to `Deny` + // +optional + optional string parameterNotFoundAction = 4; +} + +// TypeChecking contains results of type checking the expressions in the +// ValidatingAdmissionPolicy +message TypeChecking { + // The type checking warnings for each expression. + // +optional + // +listType=atomic + repeated ExpressionWarning expressionWarnings = 1; +} + +// ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it. +message ValidatingAdmissionPolicy { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the ValidatingAdmissionPolicy. + optional ValidatingAdmissionPolicySpec spec = 2; + + // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy + // behaves in the expected way. + // Populated by the system. + // Read-only. + // +optional + optional ValidatingAdmissionPolicyStatus status = 3; +} + +// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. +// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters. +// +// For a given admission request, each binding will cause its policy to be +// evaluated N times, where N is 1 for policies/bindings that don't use +// params, otherwise N is the number of parameters selected by the binding. +// +// The CEL expressions of a policy must have a computed CEL cost below the maximum +// CEL budget. Each evaluation of the policy is given an independent CEL cost budget. +// Adding/removing policies, bindings, or params can not affect whether a +// given (policy, binding, param) combination is within its own CEL budget. +message ValidatingAdmissionPolicyBinding { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding. + optional ValidatingAdmissionPolicyBindingSpec spec = 2; +} + +// ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding. +message ValidatingAdmissionPolicyBindingList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of PolicyBinding. + repeated ValidatingAdmissionPolicyBinding items = 2; +} + +// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. +message ValidatingAdmissionPolicyBindingSpec { + // PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. + // If the referenced resource does not exist, this binding is considered invalid and will be ignored + // Required. + optional string policyName = 1; + + // paramRef specifies the parameter resource used to configure the admission control policy. + // It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. + // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. + // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param. + // +optional + optional ParamRef paramRef = 2; + + // MatchResources declares what resources match this binding and will be validated by it. + // Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. + // If this is unset, all resources matched by the policy are validated by this binding + // When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. + // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required. + // +optional + optional MatchResources matchResources = 3; + + // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. + // If a validation evaluates to false it is always enforced according to these actions. + // + // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according + // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are + // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. + // + // validationActions is declared as a set of action values. Order does + // not matter. validationActions may not contain duplicates of the same action. + // + // The supported actions values are: + // + // "Deny" specifies that a validation failure results in a denied request. + // + // "Warn" specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + // + // "Audit" specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failures, formatted as + // a JSON list of objects, each with the following fields: + // - message: The validation failure message string + // - policy: The resource name of the ValidatingAdmissionPolicy + // - binding: The resource name of the ValidatingAdmissionPolicyBinding + // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy + // - validationActions: The enforcement actions enacted for the validation failure + // Example audit annotation: + // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"` + // + // Clients should expect to handle additional values by ignoring + // any values not recognized. + // + // "Deny" and "Warn" may not be used together since this combination + // needlessly duplicates the validation failure both in the + // API response body and the HTTP warning headers. + // + // Required. + // +listType=set + repeated string validationActions = 4; +} + +// ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy. +message ValidatingAdmissionPolicyList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingAdmissionPolicy. + repeated ValidatingAdmissionPolicy items = 2; +} + +// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. +message ValidatingAdmissionPolicySpec { + // ParamKind specifies the kind of resources used to parameterize this policy. + // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. + // If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. + // If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null. + // +optional + optional ParamKind paramKind = 1; + + // MatchConstraints specifies what resources this policy is designed to validate. + // The AdmissionPolicy cares about a request if it matches _all_ Constraints. + // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API + // ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. + // Required. + optional MatchResources matchConstraints = 2; + + // Validations contain CEL expressions which is used to apply the validation. + // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is + // required. + // +listType=atomic + // +optional + repeated Validation validations = 3; + + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // + // A policy is invalid if spec.paramKind refers to a non-existent Kind. + // A binding is invalid if spec.paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions + // define how failures are enforced. + // + // Allowed values are Ignore or Fail. Defaults to Fail. + // +optional + optional string failurePolicy = 4; + + // auditAnnotations contains CEL expressions which are used to produce audit + // annotations for the audit event of the API request. + // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is + // required. + // +listType=atomic + // +optional + repeated AuditAnnotation auditAnnotations = 5; + + // MatchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated MatchCondition matchConditions = 6; + + // Variables contain definitions of variables that can be used in composition of other expressions. + // Each variable is defined as a named CEL expression. + // The variables defined here will be available under `variables` in other expressions of the policy + // except MatchConditions because MatchConditions are evaluated before the rest of the policy. + // + // The expression of a variable can refer to other variables defined earlier in the list but not those after. + // Thus, Variables must be sorted by the order of first appearance and acyclic. + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated Variable variables = 7; +} + +// ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy. +message ValidatingAdmissionPolicyStatus { + // The generation observed by the controller. + // +optional + optional int64 observedGeneration = 1; + + // The results of type checking for each expression. + // Presence of this field indicates the completion of the type checking. + // +optional + optional TypeChecking typeChecking = 2; + + // The conditions represent the latest available observations of a policy's current state. + // +optional + // +listType=map + // +listMapKey=type + repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; +} + +// Validation specifies the CEL expression which is used to apply the validation. +message Validation { + // Expression represents the expression which will be evaluated by CEL. + // ref: https://github.com/google/cel-spec + // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object. No other metadata properties are accessible. + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Accessible property names are escaped according to the following rules when accessed in the expression: + // - '__' escapes to '__underscores__' + // - '.' escapes to '__dot__' + // - '-' escapes to '__dash__' + // - '/' escapes to '__slash__' + // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: + // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", + // "import", "let", "loop", "package", "namespace", "return". + // Examples: + // - Expression accessing a property named "namespace": {"Expression": "object.__namespace__ > 0"} + // - Expression accessing a property named "x-prop": {"Expression": "object.x__dash__prop > 0"} + // - Expression accessing a property named "redact__d": {"Expression": "object.redact__underscores__d > 0"} + // + // Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. + // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: + // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and + // non-intersecting elements in `Y` are appended, retaining their partial order. + // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values + // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with + // non-intersecting keys are appended, retaining their partial order. + // Required. + optional string Expression = 1; + + // Message represents the message displayed when validation fails. The message is required if the Expression contains + // line breaks. The message must not contain line breaks. + // If unset, the message is "failed rule: {Rule}". + // e.g. "must be a URL with the host matching spec.host" + // If the Expression contains line breaks. Message is required. + // The message must not contain line breaks. + // If unset, the message is "failed Expression: {Expression}". + // +optional + optional string message = 2; + + // Reason represents a machine-readable description of why this validation failed. + // If this is the first validation in the list to fail, this reason, as well as the + // corresponding HTTP response code, are used in the + // HTTP response to the client. + // The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge". + // If not set, StatusReasonInvalid is used in the response to the client. + // +optional + optional string reason = 3; + + // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. + // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. + // Example: + // "object.x must be less than max ("+string(params.max)+")" + // +optional + optional string messageExpression = 4; +} + +// Variable is the definition of a variable that is used for composition. +message Variable { + // Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. + // The variable can be accessed in other expressions through `variables` + // For example, if name is "foo", the variable will be available as `variables.foo` + optional string Name = 1; + + // Expression is the expression that will be evaluated as the value of the variable. + // The CEL expression has access to the same identifiers as the CEL expressions in Validation. + optional string Expression = 2; +} + diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go new file mode 100644 index 000000000..d4c2fbe80 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "admissionregistration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. +// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ValidatingAdmissionPolicy{}, + &ValidatingAdmissionPolicyList{}, + &ValidatingAdmissionPolicyBinding{}, + &ValidatingAdmissionPolicyBindingList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go new file mode 100644 index 000000000..575456c83 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -0,0 +1,665 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +type Rule = v1.Rule + +// ScopeType specifies a scope for a Rule. +// +enum +type ScopeType = v1.ScopeType + +const ( + // ClusterScope means that scope is limited to cluster-scoped objects. + // Namespace objects are cluster-scoped. + ClusterScope ScopeType = v1.ClusterScope + // NamespacedScope means that scope is limited to namespaced objects. + NamespacedScope ScopeType = v1.NamespacedScope + // AllScopes means that all scopes are included. + AllScopes ScopeType = v1.AllScopes +) + +// ParameterNotFoundActionType specifies a failure policy that defines how a binding +// is evaluated when the param referred by its perNamespaceParamRef is not found. +// +enum +type ParameterNotFoundActionType string + +const ( + // Ignore means that an error finding params for a binding is ignored + AllowAction ParameterNotFoundActionType = "Allow" + // Fail means that an error finding params for a binding is ignored + DenyAction ParameterNotFoundActionType = "Deny" +) + +// FailurePolicyType specifies a failure policy that defines how unrecognized errors from the admission endpoint are handled. +// +enum +type FailurePolicyType string + +const ( + // Ignore means that an error calling the webhook is ignored. + Ignore FailurePolicyType = "Ignore" + // Fail means that an error calling the webhook causes the admission to fail. + Fail FailurePolicyType = "Fail" +) + +// MatchPolicyType specifies the type of match policy. +// +enum +type MatchPolicyType string + +const ( + // Exact means requests should only be sent to the webhook if they exactly match a given rule. + Exact MatchPolicyType = "Exact" + // Equivalent means requests should be sent to the webhook if they modify a resource listed in rules via another API group or version. + Equivalent MatchPolicyType = "Equivalent" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it. +type ValidatingAdmissionPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the ValidatingAdmissionPolicy. + Spec ValidatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy + // behaves in the expected way. + // Populated by the system. + // Read-only. + // +optional + Status ValidatingAdmissionPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy. +type ValidatingAdmissionPolicyStatus struct { + // The generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + // The results of type checking for each expression. + // Presence of this field indicates the completion of the type checking. + // +optional + TypeChecking *TypeChecking `json:"typeChecking,omitempty" protobuf:"bytes,2,opt,name=typeChecking"` + // The conditions represent the latest available observations of a policy's current state. + // +optional + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` +} + +// TypeChecking contains results of type checking the expressions in the +// ValidatingAdmissionPolicy +type TypeChecking struct { + // The type checking warnings for each expression. + // +optional + // +listType=atomic + ExpressionWarnings []ExpressionWarning `json:"expressionWarnings,omitempty" protobuf:"bytes,1,rep,name=expressionWarnings"` +} + +// ExpressionWarning is a warning information that targets a specific expression. +type ExpressionWarning struct { + // The path to the field that refers the expression. + // For example, the reference to the expression of the first item of + // validations is "spec.validations[0].expression" + FieldRef string `json:"fieldRef" protobuf:"bytes,2,opt,name=fieldRef"` + // The content of type checking information in a human-readable form. + // Each line of the warning contains the type that the expression is checked + // against, followed by the type check error from the compiler. + Warning string `json:"warning" protobuf:"bytes,3,opt,name=warning"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy. +type ValidatingAdmissionPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of ValidatingAdmissionPolicy. + Items []ValidatingAdmissionPolicy `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` +} + +// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. +type ValidatingAdmissionPolicySpec struct { + // ParamKind specifies the kind of resources used to parameterize this policy. + // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. + // If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. + // If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null. + // +optional + ParamKind *ParamKind `json:"paramKind,omitempty" protobuf:"bytes,1,rep,name=paramKind"` + + // MatchConstraints specifies what resources this policy is designed to validate. + // The AdmissionPolicy cares about a request if it matches _all_ Constraints. + // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API + // ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. + // Required. + MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"` + + // Validations contain CEL expressions which is used to apply the validation. + // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is + // required. + // +listType=atomic + // +optional + Validations []Validation `json:"validations,omitempty" protobuf:"bytes,3,rep,name=validations"` + + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // + // A policy is invalid if spec.paramKind refers to a non-existent Kind. + // A binding is invalid if spec.paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions + // define how failures are enforced. + // + // Allowed values are Ignore or Fail. Defaults to Fail. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // auditAnnotations contains CEL expressions which are used to produce audit + // annotations for the audit event of the API request. + // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is + // required. + // +listType=atomic + // +optional + AuditAnnotations []AuditAnnotation `json:"auditAnnotations,omitempty" protobuf:"bytes,5,rep,name=auditAnnotations"` + + // MatchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"` + + // Variables contain definitions of variables that can be used in composition of other expressions. + // Each variable is defined as a named CEL expression. + // The variables defined here will be available under `variables` in other expressions of the policy + // except MatchConditions because MatchConditions are evaluated before the rest of the policy. + // + // The expression of a variable can refer to other variables defined earlier in the list but not those after. + // Thus, Variables must be sorted by the order of first appearance and acyclic. + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + Variables []Variable `json:"variables" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` +} + +type MatchCondition v1.MatchCondition + +// ParamKind is a tuple of Group Kind and Version. +// +structType=atomic +type ParamKind struct { + // APIVersion is the API group version the resources belong to. + // In format of "group/version". + // Required. + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,rep,name=apiVersion"` + + // Kind is the API kind the resources belong to. + // Required. + Kind string `json:"kind,omitempty" protobuf:"bytes,2,rep,name=kind"` +} + +// Validation specifies the CEL expression which is used to apply the validation. +type Validation struct { + // Expression represents the expression which will be evaluated by CEL. + // ref: https://github.com/google/cel-spec + // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object. No other metadata properties are accessible. + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Accessible property names are escaped according to the following rules when accessed in the expression: + // - '__' escapes to '__underscores__' + // - '.' escapes to '__dot__' + // - '-' escapes to '__dash__' + // - '/' escapes to '__slash__' + // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: + // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", + // "import", "let", "loop", "package", "namespace", "return". + // Examples: + // - Expression accessing a property named "namespace": {"Expression": "object.__namespace__ > 0"} + // - Expression accessing a property named "x-prop": {"Expression": "object.x__dash__prop > 0"} + // - Expression accessing a property named "redact__d": {"Expression": "object.redact__underscores__d > 0"} + // + // Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. + // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: + // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and + // non-intersecting elements in `Y` are appended, retaining their partial order. + // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values + // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with + // non-intersecting keys are appended, retaining their partial order. + // Required. + Expression string `json:"expression" protobuf:"bytes,1,opt,name=Expression"` + // Message represents the message displayed when validation fails. The message is required if the Expression contains + // line breaks. The message must not contain line breaks. + // If unset, the message is "failed rule: {Rule}". + // e.g. "must be a URL with the host matching spec.host" + // If the Expression contains line breaks. Message is required. + // The message must not contain line breaks. + // If unset, the message is "failed Expression: {Expression}". + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // Reason represents a machine-readable description of why this validation failed. + // If this is the first validation in the list to fail, this reason, as well as the + // corresponding HTTP response code, are used in the + // HTTP response to the client. + // The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge". + // If not set, StatusReasonInvalid is used in the response to the client. + // +optional + Reason *metav1.StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. + // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. + // Example: + // "object.x must be less than max ("+string(params.max)+")" + // +optional + MessageExpression string `json:"messageExpression,omitempty" protobuf:"bytes,4,opt,name=messageExpression"` +} + +// Variable is the definition of a variable that is used for composition. +type Variable struct { + // Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. + // The variable can be accessed in other expressions through `variables` + // For example, if name is "foo", the variable will be available as `variables.foo` + Name string `json:"name" protobuf:"bytes,1,opt,name=Name"` + + // Expression is the expression that will be evaluated as the value of the variable. + // The CEL expression has access to the same identifiers as the CEL expressions in Validation. + Expression string `json:"expression" protobuf:"bytes,2,opt,name=Expression"` +} + +// AuditAnnotation describes how to produce an audit annotation for an API request. +type AuditAnnotation struct { + // key specifies the audit annotation key. The audit annotation keys of + // a ValidatingAdmissionPolicy must be unique. The key must be a qualified + // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. + // + // The key is combined with the resource name of the + // ValidatingAdmissionPolicy to construct an audit annotation key: + // "{ValidatingAdmissionPolicy name}/{key}". + // + // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy + // and the same audit annotation key, the annotation key will be identical. + // In this case, the first annotation written with the key will be included + // in the audit event and all subsequent annotations with the same key + // will be discarded. + // + // Required. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + + // valueExpression represents the expression which is evaluated by CEL to + // produce an audit annotation value. The expression must evaluate to either + // a string or null value. If the expression evaluates to a string, the + // audit annotation is included with the string value. If the expression + // evaluates to null or empty string the audit annotation will be omitted. + // The valueExpression may be no longer than 5kb in length. + // If the result of the valueExpression is more than 10kb in length, it + // will be truncated to 10kb. + // + // If multiple ValidatingAdmissionPolicyBinding resources match an + // API request, then the valueExpression will be evaluated for + // each binding. All unique values produced by the valueExpressions + // will be joined together in a comma-separated list. + // + // Required. + ValueExpression string `json:"valueExpression" protobuf:"bytes,2,opt,name=valueExpression"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. +// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters. +// +// For a given admission request, each binding will cause its policy to be +// evaluated N times, where N is 1 for policies/bindings that don't use +// params, otherwise N is the number of parameters selected by the binding. +// +// The CEL expressions of a policy must have a computed CEL cost below the maximum +// CEL budget. Each evaluation of the policy is given an independent CEL cost budget. +// Adding/removing policies, bindings, or params can not affect whether a +// given (policy, binding, param) combination is within its own CEL budget. +type ValidatingAdmissionPolicyBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding. + Spec ValidatingAdmissionPolicyBindingSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding. +type ValidatingAdmissionPolicyBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of PolicyBinding. + Items []ValidatingAdmissionPolicyBinding `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` +} + +// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. +type ValidatingAdmissionPolicyBindingSpec struct { + // PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. + // If the referenced resource does not exist, this binding is considered invalid and will be ignored + // Required. + PolicyName string `json:"policyName,omitempty" protobuf:"bytes,1,rep,name=policyName"` + + // paramRef specifies the parameter resource used to configure the admission control policy. + // It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. + // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. + // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param. + // +optional + ParamRef *ParamRef `json:"paramRef,omitempty" protobuf:"bytes,2,rep,name=paramRef"` + + // MatchResources declares what resources match this binding and will be validated by it. + // Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. + // If this is unset, all resources matched by the policy are validated by this binding + // When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. + // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required. + // +optional + MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"` + + // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. + // If a validation evaluates to false it is always enforced according to these actions. + // + // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according + // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are + // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. + // + // validationActions is declared as a set of action values. Order does + // not matter. validationActions may not contain duplicates of the same action. + // + // The supported actions values are: + // + // "Deny" specifies that a validation failure results in a denied request. + // + // "Warn" specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + // + // "Audit" specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failures, formatted as + // a JSON list of objects, each with the following fields: + // - message: The validation failure message string + // - policy: The resource name of the ValidatingAdmissionPolicy + // - binding: The resource name of the ValidatingAdmissionPolicyBinding + // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy + // - validationActions: The enforcement actions enacted for the validation failure + // Example audit annotation: + // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"` + // + // Clients should expect to handle additional values by ignoring + // any values not recognized. + // + // "Deny" and "Warn" may not be used together since this combination + // needlessly duplicates the validation failure both in the + // API response body and the HTTP warning headers. + // + // Required. + // +listType=set + ValidationActions []ValidationAction `json:"validationActions,omitempty" protobuf:"bytes,4,rep,name=validationActions"` +} + +// ParamRef describes how to locate the params to be used as input to +// expressions of rules applied by a policy binding. +// +structType=atomic +type ParamRef struct { + // `name` is the name of the resource being referenced. + // + // `name` and `selector` are mutually exclusive properties. If one is set, + // the other must be unset. + // + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,rep,name=name"` + + // namespace is the namespace of the referenced resource. Allows limiting + // the search for params to a specific namespace. Applies to both `name` and + // `selector` fields. + // + // A per-namespace parameter may be used by specifying a namespace-scoped + // `paramKind` in the policy and leaving this field empty. + // + // - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this + // field results in a configuration error. + // + // - If `paramKind` is namespace-scoped, the namespace of the object being + // evaluated for admission will be used when this field is left unset. Take + // care that if this is left empty the binding must not match any cluster-scoped + // resources, which will result in an error. + // + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,rep,name=namespace"` + + // selector can be used to match multiple param objects based on their labels. + // Supply selector: {} to match all resources of the ParamKind. + // + // If multiple params are found, they are all evaluated with the policy expressions + // and the results are ANDed together. + // + // One of `name` or `selector` must be set, but `name` and `selector` are + // mutually exclusive properties. If one is set, the other must be unset. + // + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,rep,name=selector"` + + // `parameterNotFoundAction` controls the behavior of the binding when the resource + // exists, and name or selector is valid, but there are no parameters + // matched by the binding. If the value is set to `Allow`, then no + // matched parameters will be treated as successful validation by the binding. + // If set to `Deny`, then no matched parameters will be subject to the + // `failurePolicy` of the policy. + // + // Allowed values are `Allow` or `Deny` + // Default to `Deny` + // +optional + ParameterNotFoundAction *ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty" protobuf:"bytes,4,rep,name=parameterNotFoundAction"` +} + +// MatchResources decides whether to run the admission control policy on an object based +// on whether it meets the match criteria. +// The exclude rules take precedence over include rules (if a resource matches both, it is excluded) +// +structType=atomic +type MatchResources struct { + // NamespaceSelector decides whether to run the admission control policy on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the policy. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the policy on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,1,opt,name=namespaceSelector"` + // ObjectSelector decides whether to run the validation based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the cel validation, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,2,opt,name=objectSelector"` + // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. + // The policy cares about an operation if it matches _any_ Rule. + // +listType=atomic + // +optional + ResourceRules []NamedRuleWithOperations `json:"resourceRules,omitempty" protobuf:"bytes,3,rep,name=resourceRules"` + // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. + // The exclude rules take precedence over include rules (if a resource matches both, it is excluded) + // +listType=atomic + // +optional + ExcludeResourceRules []NamedRuleWithOperations `json:"excludeResourceRules,omitempty" protobuf:"bytes,4,rep,name=excludeResourceRules"` + // matchPolicy defines how the "MatchResources" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy. + // + // Defaults to "Equivalent" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,7,opt,name=matchPolicy,casttype=MatchPolicyType"` +} + +// ValidationAction specifies a policy enforcement action. +// +enum +type ValidationAction string + +const ( + // Deny specifies that a validation failure results in a denied request. + Deny ValidationAction = "Deny" + // Warn specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + Warn ValidationAction = "Warn" + // Audit specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failure. + Audit ValidationAction = "Audit" +) + +// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames. +// +structType=atomic +type NamedRuleWithOperations struct { + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +listType=atomic + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,1,rep,name=resourceNames"` + // RuleWithOperations is a tuple of Operations and Resources. + RuleWithOperations `json:",inline" protobuf:"bytes,2,opt,name=ruleWithOperations"` +} + +// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make +// sure that all the tuple expansions are valid. +type RuleWithOperations = v1.RuleWithOperations + +// OperationType specifies an operation for a request. +// +enum +type OperationType = v1.OperationType + +// The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go. +const ( + OperationAll OperationType = v1.OperationAll + Create OperationType = v1.Create + Update OperationType = v1.Update + Delete OperationType = v1.Delete + Connect OperationType = v1.Connect +) diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..dcf46b324 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,204 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AuditAnnotation = map[string]string{ + "": "AuditAnnotation describes how to produce an audit annotation for an API request.", + "key": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.", + "valueExpression": "valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\n\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\n\nRequired.", +} + +func (AuditAnnotation) SwaggerDoc() map[string]string { + return map_AuditAnnotation +} + +var map_ExpressionWarning = map[string]string{ + "": "ExpressionWarning is a warning information that targets a specific expression.", + "fieldRef": "The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\"", + "warning": "The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler.", +} + +func (ExpressionWarning) SwaggerDoc() map[string]string { + return map_ExpressionWarning +} + +var map_MatchResources = map[string]string{ + "": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "namespaceSelector": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the validation based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the cel validation, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "resourceRules": "ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule.", + "excludeResourceRules": "ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "matchPolicy": "matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\n\nDefaults to \"Equivalent\"", +} + +func (MatchResources) SwaggerDoc() map[string]string { + return map_MatchResources +} + +var map_NamedRuleWithOperations = map[string]string{ + "": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", +} + +func (NamedRuleWithOperations) SwaggerDoc() map[string]string { + return map_NamedRuleWithOperations +} + +var map_ParamKind = map[string]string{ + "": "ParamKind is a tuple of Group Kind and Version.", + "apiVersion": "APIVersion is the API group version the resources belong to. In format of \"group/version\". Required.", + "kind": "Kind is the API kind the resources belong to. Required.", +} + +func (ParamKind) SwaggerDoc() map[string]string { + return map_ParamKind +} + +var map_ParamRef = map[string]string{ + "": "ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding.", + "name": "`name` is the name of the resource being referenced.\n\n`name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.", + "namespace": "namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\n\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\n\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\n\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error.", + "selector": "selector can be used to match multiple param objects based on their labels. Supply selector: {} to match all resources of the ParamKind.\n\nIf multiple params are found, they are all evaluated with the policy expressions and the results are ANDed together.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.", + "parameterNotFoundAction": "`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\n\nAllowed values are `Allow` or `Deny` Default to `Deny`", +} + +func (ParamRef) SwaggerDoc() map[string]string { + return map_ParamRef +} + +var map_TypeChecking = map[string]string{ + "": "TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy", + "expressionWarnings": "The type checking warnings for each expression.", +} + +func (TypeChecking) SwaggerDoc() map[string]string { + return map_TypeChecking +} + +var map_ValidatingAdmissionPolicy = map[string]string{ + "": "ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "Specification of the desired behavior of the ValidatingAdmissionPolicy.", + "status": "The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy behaves in the expected way. Populated by the system. Read-only.", +} + +func (ValidatingAdmissionPolicy) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicy +} + +var map_ValidatingAdmissionPolicyBinding = map[string]string{ + "": "ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding.\n\nThe CEL expressions of a policy must have a computed CEL cost below the maximum CEL budget. Each evaluation of the policy is given an independent CEL cost budget. Adding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.", +} + +func (ValidatingAdmissionPolicyBinding) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyBinding +} + +var map_ValidatingAdmissionPolicyBindingList = map[string]string{ + "": "ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of PolicyBinding.", +} + +func (ValidatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyBindingList +} + +var map_ValidatingAdmissionPolicyBindingSpec = map[string]string{ + "": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.", + "policyName": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", + "paramRef": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.", + "matchResources": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.", + "validationActions": "validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\n\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\n\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\n\nThe supported actions values are:\n\n\"Deny\" specifies that a validation failure results in a denied request.\n\n\"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\n\n\"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"`\n\nClients should expect to handle additional values by ignoring any values not recognized.\n\n\"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\n\nRequired.", +} + +func (ValidatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyBindingSpec +} + +var map_ValidatingAdmissionPolicyList = map[string]string{ + "": "ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of ValidatingAdmissionPolicy.", +} + +func (ValidatingAdmissionPolicyList) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyList +} + +var map_ValidatingAdmissionPolicySpec = map[string]string{ + "": "ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.", + "paramKind": "ParamKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.", + "matchConstraints": "MatchConstraints specifies what resources this policy is designed to validate. The AdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. Required.", + "validations": "Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required.", + "failurePolicy": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\n\nAllowed values are Ignore or Fail. Defaults to Fail.", + "auditAnnotations": "auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped", + "variables": "Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic.", +} + +func (ValidatingAdmissionPolicySpec) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicySpec +} + +var map_ValidatingAdmissionPolicyStatus = map[string]string{ + "": "ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy.", + "observedGeneration": "The generation observed by the controller.", + "typeChecking": "The results of type checking for each expression. Presence of this field indicates the completion of the type checking.", + "conditions": "The conditions represent the latest available observations of a policy's current state.", +} + +func (ValidatingAdmissionPolicyStatus) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyStatus +} + +var map_Validation = map[string]string{ + "": "Validation specifies the CEL expression which is used to apply the validation.", + "expression": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.", + "message": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".", + "reason": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.", + "messageExpression": "messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\"", +} + +func (Validation) SwaggerDoc() map[string]string { + return map_Validation +} + +var map_Variable = map[string]string{ + "": "Variable is the definition of a variable that is used for composition.", + "name": "Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo`", + "expression": "Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation.", +} + +func (Variable) SwaggerDoc() map[string]string { + return map_Variable +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..24cd0e4e9 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,475 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditAnnotation. +func (in *AuditAnnotation) DeepCopy() *AuditAnnotation { + if in == nil { + return nil + } + out := new(AuditAnnotation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressionWarning) DeepCopyInto(out *ExpressionWarning) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressionWarning. +func (in *ExpressionWarning) DeepCopy() *ExpressionWarning { + if in == nil { + return nil + } + out := new(ExpressionWarning) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchCondition) DeepCopyInto(out *MatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchCondition. +func (in *MatchCondition) DeepCopy() *MatchCondition { + if in == nil { + return nil + } + out := new(MatchCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchResources) DeepCopyInto(out *MatchResources) { + *out = *in + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ResourceRules != nil { + in, out := &in.ResourceRules, &out.ResourceRules + *out = make([]NamedRuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExcludeResourceRules != nil { + in, out := &in.ExcludeResourceRules, &out.ExcludeResourceRules + *out = make([]NamedRuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchResources. +func (in *MatchResources) DeepCopy() *MatchResources { + if in == nil { + return nil + } + out := new(MatchResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedRuleWithOperations) DeepCopyInto(out *NamedRuleWithOperations) { + *out = *in + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.RuleWithOperations.DeepCopyInto(&out.RuleWithOperations) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedRuleWithOperations. +func (in *NamedRuleWithOperations) DeepCopy() *NamedRuleWithOperations { + if in == nil { + return nil + } + out := new(NamedRuleWithOperations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParamKind) DeepCopyInto(out *ParamKind) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamKind. +func (in *ParamKind) DeepCopy() *ParamKind { + if in == nil { + return nil + } + out := new(ParamKind) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParamRef) DeepCopyInto(out *ParamRef) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ParameterNotFoundAction != nil { + in, out := &in.ParameterNotFoundAction, &out.ParameterNotFoundAction + *out = new(ParameterNotFoundActionType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamRef. +func (in *ParamRef) DeepCopy() *ParamRef { + if in == nil { + return nil + } + out := new(ParamRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypeChecking) DeepCopyInto(out *TypeChecking) { + *out = *in + if in.ExpressionWarnings != nil { + in, out := &in.ExpressionWarnings, &out.ExpressionWarnings + *out = make([]ExpressionWarning, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeChecking. +func (in *TypeChecking) DeepCopy() *TypeChecking { + if in == nil { + return nil + } + out := new(TypeChecking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicy) DeepCopyInto(out *ValidatingAdmissionPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicy. +func (in *ValidatingAdmissionPolicy) DeepCopy() *ValidatingAdmissionPolicy { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyBinding) DeepCopyInto(out *ValidatingAdmissionPolicyBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBinding. +func (in *ValidatingAdmissionPolicyBinding) DeepCopy() *ValidatingAdmissionPolicyBinding { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicyBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyBindingList) DeepCopyInto(out *ValidatingAdmissionPolicyBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingAdmissionPolicyBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBindingList. +func (in *ValidatingAdmissionPolicyBindingList) DeepCopy() *ValidatingAdmissionPolicyBindingList { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicyBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopyInto(out *ValidatingAdmissionPolicyBindingSpec) { + *out = *in + if in.ParamRef != nil { + in, out := &in.ParamRef, &out.ParamRef + *out = new(ParamRef) + (*in).DeepCopyInto(*out) + } + if in.MatchResources != nil { + in, out := &in.MatchResources, &out.MatchResources + *out = new(MatchResources) + (*in).DeepCopyInto(*out) + } + if in.ValidationActions != nil { + in, out := &in.ValidationActions, &out.ValidationActions + *out = make([]ValidationAction, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBindingSpec. +func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopy() *ValidatingAdmissionPolicyBindingSpec { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyList) DeepCopyInto(out *ValidatingAdmissionPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingAdmissionPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyList. +func (in *ValidatingAdmissionPolicyList) DeepCopy() *ValidatingAdmissionPolicyList { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicySpec) DeepCopyInto(out *ValidatingAdmissionPolicySpec) { + *out = *in + if in.ParamKind != nil { + in, out := &in.ParamKind, &out.ParamKind + *out = new(ParamKind) + **out = **in + } + if in.MatchConstraints != nil { + in, out := &in.MatchConstraints, &out.MatchConstraints + *out = new(MatchResources) + (*in).DeepCopyInto(*out) + } + if in.Validations != nil { + in, out := &in.Validations, &out.Validations + *out = make([]Validation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.AuditAnnotations != nil { + in, out := &in.AuditAnnotations, &out.AuditAnnotations + *out = make([]AuditAnnotation, len(*in)) + copy(*out, *in) + } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make([]Variable, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicySpec. +func (in *ValidatingAdmissionPolicySpec) DeepCopy() *ValidatingAdmissionPolicySpec { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyStatus) DeepCopyInto(out *ValidatingAdmissionPolicyStatus) { + *out = *in + if in.TypeChecking != nil { + in, out := &in.TypeChecking, &out.TypeChecking + *out = new(TypeChecking) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyStatus. +func (in *ValidatingAdmissionPolicyStatus) DeepCopy() *ValidatingAdmissionPolicyStatus { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Validation) DeepCopyInto(out *Validation) { + *out = *in + if in.Reason != nil { + in, out := &in.Reason, &out.Reason + *out = new(v1.StatusReason) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Validation. +func (in *Validation) DeepCopy() *Validation { + if in == nil { + return nil + } + out := new(Validation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Variable) DeepCopyInto(out *Variable) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Variable. +func (in *Variable) DeepCopy() *Variable { + if in == nil { + return nil + } + out := new(Variable) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index e2899ea17..267ddc1cb 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -25,6 +25,8 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/admissionregistration/v1" + k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" @@ -44,10 +46,122 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} } +func (*AuditAnnotation) ProtoMessage() {} +func (*AuditAnnotation) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{0} +} +func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditAnnotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditAnnotation.Merge(m, src) +} +func (m *AuditAnnotation) XXX_Size() int { + return m.Size() +} +func (m *AuditAnnotation) XXX_DiscardUnknown() { + xxx_messageInfo_AuditAnnotation.DiscardUnknown(m) +} + +var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo + +func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} } +func (*ExpressionWarning) ProtoMessage() {} +func (*ExpressionWarning) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{1} +} +func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExpressionWarning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExpressionWarning) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExpressionWarning.Merge(m, src) +} +func (m *ExpressionWarning) XXX_Size() int { + return m.Size() +} +func (m *ExpressionWarning) XXX_DiscardUnknown() { + xxx_messageInfo_ExpressionWarning.DiscardUnknown(m) +} + +var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo + +func (m *MatchCondition) Reset() { *m = MatchCondition{} } +func (*MatchCondition) ProtoMessage() {} +func (*MatchCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{2} +} +func (m *MatchCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchCondition.Merge(m, src) +} +func (m *MatchCondition) XXX_Size() int { + return m.Size() +} +func (m *MatchCondition) XXX_DiscardUnknown() { + xxx_messageInfo_MatchCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchCondition proto.InternalMessageInfo + +func (m *MatchResources) Reset() { *m = MatchResources{} } +func (*MatchResources) ProtoMessage() {} +func (*MatchResources) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{3} +} +func (m *MatchResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchResources.Merge(m, src) +} +func (m *MatchResources) XXX_Size() int { + return m.Size() +} +func (m *MatchResources) XXX_DiscardUnknown() { + xxx_messageInfo_MatchResources.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchResources proto.InternalMessageInfo + func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } func (*MutatingWebhook) ProtoMessage() {} func (*MutatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{0} + return fileDescriptor_abeea74cbc46f55a, []int{4} } func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +189,7 @@ var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{1} + return fileDescriptor_abeea74cbc46f55a, []int{5} } func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +217,7 @@ var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{2} + return fileDescriptor_abeea74cbc46f55a, []int{6} } func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -128,15 +242,15 @@ func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() { var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo -func (m *Rule) Reset() { *m = Rule{} } -func (*Rule) ProtoMessage() {} -func (*Rule) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{3} +func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} } +func (*NamedRuleWithOperations) ProtoMessage() {} +func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{7} } -func (m *Rule) XXX_Unmarshal(b []byte) error { +func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *NamedRuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -144,27 +258,55 @@ func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { } return b[:n], nil } -func (m *Rule) XXX_Merge(src proto.Message) { - xxx_messageInfo_Rule.Merge(m, src) +func (m *NamedRuleWithOperations) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedRuleWithOperations.Merge(m, src) } -func (m *Rule) XXX_Size() int { +func (m *NamedRuleWithOperations) XXX_Size() int { return m.Size() } -func (m *Rule) XXX_DiscardUnknown() { - xxx_messageInfo_Rule.DiscardUnknown(m) +func (m *NamedRuleWithOperations) XXX_DiscardUnknown() { + xxx_messageInfo_NamedRuleWithOperations.DiscardUnknown(m) } -var xxx_messageInfo_Rule proto.InternalMessageInfo +var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo -func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } -func (*RuleWithOperations) ProtoMessage() {} -func (*RuleWithOperations) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{4} +func (m *ParamKind) Reset() { *m = ParamKind{} } +func (*ParamKind) ProtoMessage() {} +func (*ParamKind) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{8} +} +func (m *ParamKind) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } -func (m *RuleWithOperations) XXX_Unmarshal(b []byte) error { +func (m *ParamKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParamKind) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParamKind.Merge(m, src) +} +func (m *ParamKind) XXX_Size() int { + return m.Size() +} +func (m *ParamKind) XXX_DiscardUnknown() { + xxx_messageInfo_ParamKind.DiscardUnknown(m) +} + +var xxx_messageInfo_ParamKind proto.InternalMessageInfo + +func (m *ParamRef) Reset() { *m = ParamRef{} } +func (*ParamRef) ProtoMessage() {} +func (*ParamRef) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{9} +} +func (m *ParamRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ParamRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -172,22 +314,22 @@ func (m *RuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, } return b[:n], nil } -func (m *RuleWithOperations) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuleWithOperations.Merge(m, src) +func (m *ParamRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParamRef.Merge(m, src) } -func (m *RuleWithOperations) XXX_Size() int { +func (m *ParamRef) XXX_Size() int { return m.Size() } -func (m *RuleWithOperations) XXX_DiscardUnknown() { - xxx_messageInfo_RuleWithOperations.DiscardUnknown(m) +func (m *ParamRef) XXX_DiscardUnknown() { + xxx_messageInfo_ParamRef.DiscardUnknown(m) } -var xxx_messageInfo_RuleWithOperations proto.InternalMessageInfo +var xxx_messageInfo_ParamRef proto.InternalMessageInfo func (m *ServiceReference) Reset() { *m = ServiceReference{} } func (*ServiceReference) ProtoMessage() {} func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{5} + return fileDescriptor_abeea74cbc46f55a, []int{10} } func (m *ServiceReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -212,10 +354,234 @@ func (m *ServiceReference) XXX_DiscardUnknown() { var xxx_messageInfo_ServiceReference proto.InternalMessageInfo +func (m *TypeChecking) Reset() { *m = TypeChecking{} } +func (*TypeChecking) ProtoMessage() {} +func (*TypeChecking) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{11} +} +func (m *TypeChecking) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeChecking) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeChecking) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeChecking.Merge(m, src) +} +func (m *TypeChecking) XXX_Size() int { + return m.Size() +} +func (m *TypeChecking) XXX_DiscardUnknown() { + xxx_messageInfo_TypeChecking.DiscardUnknown(m) +} + +var xxx_messageInfo_TypeChecking proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} } +func (*ValidatingAdmissionPolicy) ProtoMessage() {} +func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{12} +} +func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicy.Merge(m, src) +} +func (m *ValidatingAdmissionPolicy) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} } +func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {} +func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{13} +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyBinding.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} } +func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {} +func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{14} +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyBindingList.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} } +func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {} +func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{15} +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} } +func (*ValidatingAdmissionPolicyList) ProtoMessage() {} +func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{16} +} +func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyList.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} } +func (*ValidatingAdmissionPolicySpec) ProtoMessage() {} +func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{17} +} +func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicySpec.Merge(m, src) +} +func (m *ValidatingAdmissionPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} } +func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {} +func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{18} +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyStatus.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo + func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } func (*ValidatingWebhook) ProtoMessage() {} func (*ValidatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{6} + return fileDescriptor_abeea74cbc46f55a, []int{19} } func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +609,7 @@ var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{7} + return fileDescriptor_abeea74cbc46f55a, []int{20} } func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +637,7 @@ var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{8} + return fileDescriptor_abeea74cbc46f55a, []int{21} } func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -296,15 +662,15 @@ func (m *ValidatingWebhookConfigurationList) XXX_DiscardUnknown() { var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo -func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } -func (*WebhookClientConfig) ProtoMessage() {} -func (*WebhookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{9} +func (m *Validation) Reset() { *m = Validation{} } +func (*Validation) ProtoMessage() {} +func (*Validation) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{22} } -func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { +func (m *Validation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *Validation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -312,109 +678,415 @@ func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, } return b[:n], nil } -func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_WebhookClientConfig.Merge(m, src) +func (m *Validation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Validation.Merge(m, src) } -func (m *WebhookClientConfig) XXX_Size() int { +func (m *Validation) XXX_Size() int { return m.Size() } -func (m *WebhookClientConfig) XXX_DiscardUnknown() { - xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +func (m *Validation) XXX_DiscardUnknown() { + xxx_messageInfo_Validation.DiscardUnknown(m) } -var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo - -func init() { - proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook") - proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration") - proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList") - proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1beta1.Rule") - proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.RuleWithOperations") - proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1beta1.ServiceReference") - proto.RegisterType((*ValidatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhook") - proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration") - proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList") - proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1beta1.WebhookClientConfig") -} +var xxx_messageInfo_Validation proto.InternalMessageInfo -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptor_abeea74cbc46f55a) +func (m *Variable) Reset() { *m = Variable{} } +func (*Variable) ProtoMessage() {} +func (*Variable) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{23} } - -var fileDescriptor_abeea74cbc46f55a = []byte{ - // 1112 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4b, 0x6f, 0x23, 0x45, - 0x10, 0xce, 0xc4, 0xf6, 0xc6, 0x6e, 0xe7, 0xb1, 0x69, 0x60, 0xd7, 0x84, 0x95, 0xc7, 0xf2, 0x01, - 0x59, 0x02, 0x66, 0x36, 0x01, 0x21, 0x58, 0x40, 0x28, 0x0e, 0x2c, 0x44, 0x4a, 0xb2, 0xa1, 0xb3, - 0x0f, 0x89, 0x87, 0xb4, 0xed, 0x71, 0xd9, 0x6e, 0x6c, 0x4f, 0x8f, 0xa6, 0x7b, 0xbc, 0xe4, 0xc6, - 0x4f, 0xe0, 0x2f, 0x70, 0xe2, 0x57, 0x70, 0xe0, 0x16, 0x6e, 0x7b, 0xdc, 0x0b, 0x23, 0x32, 0x9c, - 0x38, 0x70, 0xe0, 0x9a, 0x13, 0xea, 0x9e, 0xf1, 0xf8, 0x95, 0x2c, 0x26, 0x48, 0x7b, 0xca, 0xcd, - 0xfd, 0x55, 0xd7, 0x57, 0x5d, 0x35, 0x55, 0xf5, 0xc9, 0xe8, 0x8b, 0xee, 0x7b, 0xc2, 0x62, 0xdc, - 0xee, 0x06, 0x0d, 0xf0, 0x5d, 0x90, 0x20, 0xec, 0x01, 0xb8, 0x4d, 0xee, 0xdb, 0x89, 0x81, 0x7a, - 0xcc, 0xa6, 0xcd, 0x3e, 0x13, 0x82, 0x71, 0xd7, 0x87, 0x36, 0x13, 0xd2, 0xa7, 0x92, 0x71, 0xd7, - 0x1e, 0x6c, 0x36, 0x40, 0xd2, 0x4d, 0xbb, 0x0d, 0x2e, 0xf8, 0x54, 0x42, 0xd3, 0xf2, 0x7c, 0x2e, - 0x39, 0xae, 0xc5, 0x9e, 0x16, 0xf5, 0x98, 0x75, 0xae, 0xa7, 0x95, 0x78, 0x6e, 0xbc, 0xd5, 0x66, - 0xb2, 0x13, 0x34, 0x2c, 0x87, 0xf7, 0xed, 0x36, 0x6f, 0x73, 0x5b, 0x13, 0x34, 0x82, 0x96, 0x3e, - 0xe9, 0x83, 0xfe, 0x15, 0x13, 0x6f, 0xbc, 0x33, 0x7a, 0x52, 0x9f, 0x3a, 0x1d, 0xe6, 0x82, 0x7f, - 0x6c, 0x7b, 0xdd, 0xb6, 0x02, 0x84, 0xdd, 0x07, 0x49, 0xed, 0xc1, 0xcc, 0x73, 0x36, 0xec, 0x8b, - 0xbc, 0xfc, 0xc0, 0x95, 0xac, 0x0f, 0x33, 0x0e, 0xef, 0xfe, 0x9b, 0x83, 0x70, 0x3a, 0xd0, 0xa7, - 0xd3, 0x7e, 0xd5, 0x5f, 0x97, 0xd0, 0xda, 0x7e, 0x20, 0xa9, 0x64, 0x6e, 0xfb, 0x11, 0x34, 0x3a, - 0x9c, 0x77, 0x71, 0x05, 0x65, 0x5d, 0xda, 0x87, 0x92, 0x51, 0x31, 0x6a, 0x85, 0xfa, 0xf2, 0x49, - 0x68, 0x2e, 0x44, 0xa1, 0x99, 0x3d, 0xa0, 0x7d, 0x20, 0xda, 0x82, 0x9f, 0xa0, 0x65, 0xa7, 0xc7, - 0xc0, 0x95, 0x3b, 0xdc, 0x6d, 0xb1, 0x76, 0x69, 0xb1, 0x62, 0xd4, 0x8a, 0x5b, 0x1f, 0x59, 0xf3, - 0x16, 0xd1, 0x4a, 0x42, 0xed, 0x8c, 0x91, 0xd4, 0x5f, 0x4e, 0x02, 0x2d, 0x8f, 0xa3, 0x64, 0x22, - 0x10, 0xa6, 0x28, 0xe7, 0x07, 0x3d, 0x10, 0xa5, 0x4c, 0x25, 0x53, 0x2b, 0x6e, 0x7d, 0x38, 0x7f, - 0x44, 0x12, 0xf4, 0xe0, 0x11, 0x93, 0x9d, 0x7b, 0x1e, 0xc4, 0x16, 0x51, 0x5f, 0x49, 0x02, 0xe6, - 0x94, 0x4d, 0x90, 0x98, 0x19, 0xef, 0xa1, 0x95, 0x16, 0x65, 0xbd, 0xc0, 0x87, 0x43, 0xde, 0x63, - 0xce, 0x71, 0x29, 0xab, 0xcb, 0xf0, 0x7a, 0x14, 0x9a, 0x2b, 0x77, 0xc7, 0x0d, 0x67, 0xa1, 0xb9, - 0x3e, 0x01, 0xdc, 0x3f, 0xf6, 0x80, 0x4c, 0x3a, 0xe3, 0x4f, 0x50, 0xb1, 0x4f, 0xa5, 0xd3, 0x49, - 0xb8, 0x0a, 0x9a, 0xab, 0x1a, 0x85, 0x66, 0x71, 0x7f, 0x04, 0x9f, 0x85, 0xe6, 0xda, 0xd8, 0x51, - 0xf3, 0x8c, 0xbb, 0xe1, 0xef, 0xd0, 0xba, 0xaa, 0xbb, 0xf0, 0xa8, 0x03, 0x47, 0xd0, 0x03, 0x47, - 0x72, 0xbf, 0x94, 0xd3, 0x45, 0x7f, 0x7b, 0xac, 0x04, 0xe9, 0x97, 0xb7, 0xbc, 0x6e, 0x5b, 0x01, - 0xc2, 0x52, 0x0d, 0x66, 0x0d, 0x36, 0xad, 0x3d, 0xda, 0x80, 0xde, 0xd0, 0xb5, 0xfe, 0x4a, 0x14, - 0x9a, 0xeb, 0x07, 0xd3, 0x8c, 0x64, 0x36, 0x08, 0xe6, 0x68, 0x95, 0x37, 0xbe, 0x05, 0x47, 0xa6, - 0x61, 0x8b, 0x97, 0x0f, 0x8b, 0xa3, 0xd0, 0x5c, 0xbd, 0x37, 0x41, 0x47, 0xa6, 0xe8, 0x55, 0xc1, - 0x04, 0x6b, 0xc2, 0xa7, 0xad, 0x16, 0x38, 0x52, 0x94, 0xae, 0x8d, 0x0a, 0x76, 0x34, 0x82, 0x55, - 0xc1, 0x46, 0xc7, 0x9d, 0x1e, 0x15, 0x82, 0x8c, 0xbb, 0xe1, 0x3b, 0x68, 0x55, 0x75, 0x3d, 0x0f, - 0xe4, 0x11, 0x38, 0xdc, 0x6d, 0x8a, 0xd2, 0x52, 0xc5, 0xa8, 0xe5, 0xe2, 0x17, 0xdc, 0x9f, 0xb0, - 0x90, 0xa9, 0x9b, 0xf8, 0x01, 0xba, 0x99, 0xb6, 0x12, 0x81, 0x01, 0x83, 0x27, 0x0f, 0xc1, 0x57, - 0x07, 0x51, 0xca, 0x57, 0x32, 0xb5, 0x42, 0xfd, 0xb5, 0x28, 0x34, 0x6f, 0x6e, 0x9f, 0x7f, 0x85, - 0x5c, 0xe4, 0x8b, 0x1f, 0x23, 0xec, 0x03, 0x73, 0x07, 0xdc, 0xd1, 0xed, 0x97, 0x34, 0x04, 0xd2, - 0xf9, 0xdd, 0x8e, 0x42, 0x13, 0x93, 0x19, 0xeb, 0x59, 0x68, 0xde, 0x98, 0x45, 0x75, 0x7b, 0x9c, - 0xc3, 0x55, 0xfd, 0xcd, 0x40, 0xb7, 0xa6, 0x66, 0x39, 0x1e, 0x9b, 0x20, 0xee, 0x78, 0xfc, 0x18, - 0xe5, 0xd5, 0x87, 0x69, 0x52, 0x49, 0xf5, 0x70, 0x17, 0xb7, 0x6e, 0xcf, 0xf7, 0x19, 0xe3, 0x6f, - 0xb6, 0x0f, 0x92, 0xd6, 0x71, 0x32, 0x34, 0x68, 0x84, 0x91, 0x94, 0x15, 0x7f, 0x85, 0xf2, 0x49, - 0x64, 0x51, 0x5a, 0xd4, 0x23, 0xfa, 0xfe, 0xfc, 0x23, 0x3a, 0xf5, 0xf6, 0x7a, 0x56, 0x85, 0x22, - 0x29, 0x61, 0xf5, 0x2f, 0x03, 0x55, 0x9e, 0x97, 0xdf, 0x1e, 0x13, 0x12, 0x7f, 0x3d, 0x93, 0xa3, - 0x35, 0x67, 0xab, 0x32, 0x11, 0x67, 0x78, 0x3d, 0xc9, 0x30, 0x3f, 0x44, 0xc6, 0xf2, 0xeb, 0xa2, - 0x1c, 0x93, 0xd0, 0x1f, 0x26, 0x77, 0xf7, 0xd2, 0xc9, 0x4d, 0x3c, 0x7c, 0xb4, 0x89, 0x76, 0x15, - 0x39, 0x89, 0x63, 0x54, 0x7f, 0x31, 0x50, 0x56, 0xad, 0x26, 0xfc, 0x06, 0x2a, 0x50, 0x8f, 0x7d, - 0xe6, 0xf3, 0xc0, 0x13, 0x25, 0x43, 0xf7, 0xe0, 0x4a, 0x14, 0x9a, 0x85, 0xed, 0xc3, 0xdd, 0x18, - 0x24, 0x23, 0x3b, 0xde, 0x44, 0x45, 0xea, 0xb1, 0xb4, 0x65, 0x17, 0xf5, 0xf5, 0x35, 0x35, 0x40, - 0xdb, 0x87, 0xbb, 0x69, 0x9b, 0x8e, 0xdf, 0x51, 0xfc, 0x3e, 0x08, 0x1e, 0xf8, 0x4e, 0xb2, 0x59, - 0x13, 0x7e, 0x32, 0x04, 0xc9, 0xc8, 0x8e, 0xdf, 0x44, 0x39, 0xe1, 0x70, 0x0f, 0x92, 0xbd, 0x78, - 0x43, 0x3d, 0xfb, 0x48, 0x01, 0x67, 0xa1, 0x59, 0xd0, 0x3f, 0x74, 0x83, 0xc6, 0x97, 0xaa, 0x3f, - 0x19, 0x08, 0xcf, 0xae, 0x5e, 0xfc, 0x31, 0x42, 0x3c, 0x3d, 0x25, 0x29, 0x99, 0xba, 0xab, 0x52, - 0xf4, 0x2c, 0x34, 0x57, 0xd2, 0x93, 0xa6, 0x1c, 0x73, 0xc1, 0x87, 0x28, 0xab, 0xd6, 0x75, 0xa2, - 0x3c, 0xd6, 0x7f, 0xd3, 0x81, 0x91, 0xa6, 0xa9, 0x13, 0xd1, 0x4c, 0xd5, 0x1f, 0x0d, 0x74, 0xfd, - 0x08, 0xfc, 0x01, 0x73, 0x80, 0x40, 0x0b, 0x7c, 0x70, 0x1d, 0xc0, 0x36, 0x2a, 0xa4, 0x3b, 0x31, - 0xd1, 0xc3, 0xf5, 0xc4, 0xb7, 0x90, 0xee, 0x4f, 0x32, 0xba, 0x93, 0x6a, 0xe7, 0xe2, 0x85, 0xda, - 0x79, 0x0b, 0x65, 0x3d, 0x2a, 0x3b, 0xa5, 0x8c, 0xbe, 0x91, 0x57, 0xd6, 0x43, 0x2a, 0x3b, 0x44, - 0xa3, 0xda, 0xca, 0x7d, 0xa9, 0x8b, 0x9b, 0x4b, 0xac, 0xdc, 0x97, 0x44, 0xa3, 0xd5, 0x3f, 0xaf, - 0xa1, 0xf5, 0x87, 0xb4, 0xc7, 0x9a, 0x57, 0x7a, 0x7d, 0xa5, 0xd7, 0x73, 0xea, 0x35, 0xba, 0xd2, - 0xeb, 0xcb, 0xe8, 0x75, 0xf5, 0xd4, 0x40, 0xe5, 0x99, 0x59, 0x7b, 0xd1, 0x7a, 0xfa, 0xcd, 0x8c, - 0x9e, 0x7e, 0x30, 0xff, 0x08, 0xcd, 0xbc, 0x7e, 0x46, 0x51, 0xff, 0x36, 0x50, 0xf5, 0xf9, 0x39, - 0xbe, 0x00, 0x4d, 0xed, 0x4f, 0x6a, 0xea, 0xe7, 0xff, 0x23, 0xc1, 0x79, 0x54, 0xf5, 0x67, 0x03, - 0xbd, 0x74, 0xce, 0x3a, 0xc3, 0xaf, 0xa2, 0x4c, 0xe0, 0xf7, 0x92, 0xb5, 0xbc, 0x14, 0x85, 0x66, - 0xe6, 0x01, 0xd9, 0x23, 0x0a, 0xc3, 0x14, 0x2d, 0x89, 0x58, 0x19, 0x92, 0xf4, 0xef, 0xcc, 0xff, - 0xc6, 0x69, 0x49, 0xa9, 0x17, 0xa3, 0xd0, 0x5c, 0x1a, 0xa2, 0x43, 0x5e, 0x5c, 0x43, 0x79, 0x87, - 0xd6, 0x03, 0xb7, 0x99, 0x68, 0xda, 0x72, 0x7d, 0x59, 0x95, 0x6b, 0x67, 0x3b, 0xc6, 0x48, 0x6a, - 0xad, 0x1f, 0x9c, 0x9c, 0x96, 0x17, 0x9e, 0x9e, 0x96, 0x17, 0x9e, 0x9d, 0x96, 0x17, 0xbe, 0x8f, - 0xca, 0xc6, 0x49, 0x54, 0x36, 0x9e, 0x46, 0x65, 0xe3, 0x59, 0x54, 0x36, 0x7e, 0x8f, 0xca, 0xc6, - 0x0f, 0x7f, 0x94, 0x17, 0xbe, 0xac, 0xcd, 0xfb, 0x4f, 0xf8, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, - 0x29, 0xe6, 0x3d, 0x13, 0x4d, 0x0f, 0x00, 0x00, +func (m *Variable) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - +func (m *Variable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Variable) XXX_Merge(src proto.Message) { + xxx_messageInfo_Variable.Merge(m, src) +} +func (m *Variable) XXX_Size() int { + return m.Size() +} +func (m *Variable) XXX_DiscardUnknown() { + xxx_messageInfo_Variable.DiscardUnknown(m) +} + +var xxx_messageInfo_Variable proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{24} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1beta1.AuditAnnotation") + proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1beta1.ExpressionWarning") + proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchCondition") + proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchResources") + proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook") + proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration") + proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList") + proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.NamedRuleWithOperations") + proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1beta1.ParamKind") + proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1beta1.ParamRef") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1beta1.ServiceReference") + proto.RegisterType((*TypeChecking)(nil), "k8s.io.api.admissionregistration.v1beta1.TypeChecking") + proto.RegisterType((*ValidatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicy") + proto.RegisterType((*ValidatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBinding") + proto.RegisterType((*ValidatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingList") + proto.RegisterType((*ValidatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingSpec") + proto.RegisterType((*ValidatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyList") + proto.RegisterType((*ValidatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicySpec") + proto.RegisterType((*ValidatingAdmissionPolicyStatus)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyStatus") + proto.RegisterType((*ValidatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhook") + proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration") + proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList") + proto.RegisterType((*Validation)(nil), "k8s.io.api.admissionregistration.v1beta1.Validation") + proto.RegisterType((*Variable)(nil), "k8s.io.api.admissionregistration.v1beta1.Variable") + proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1beta1.WebhookClientConfig") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptor_abeea74cbc46f55a) +} + +var fileDescriptor_abeea74cbc46f55a = []byte{ + // 1973 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x1a, 0x4d, 0x6f, 0x23, 0x49, + 0x35, 0x1d, 0xe7, 0xc3, 0x7e, 0xce, 0x97, 0x6b, 0x67, 0x89, 0x77, 0x76, 0xd6, 0x8e, 0x5a, 0x2b, + 0x94, 0x91, 0xc0, 0xde, 0xc9, 0xae, 0x76, 0x97, 0x59, 0x21, 0x14, 0x67, 0x67, 0x86, 0x99, 0x9d, + 0x64, 0x42, 0x65, 0x37, 0x91, 0x60, 0x57, 0x9a, 0x72, 0x77, 0xd9, 0x6e, 0x6c, 0x77, 0x37, 0x5d, + 0x6d, 0xcf, 0x04, 0x24, 0x40, 0xe2, 0xb0, 0x57, 0x24, 0x2e, 0x48, 0x9c, 0xf8, 0x0b, 0xdc, 0x91, + 0xe0, 0x36, 0xc7, 0xbd, 0x31, 0x12, 0xc2, 0x22, 0xe6, 0xc0, 0x89, 0x03, 0x07, 0x38, 0xe4, 0x02, + 0xaa, 0xea, 0xea, 0x4f, 0xb7, 0x27, 0x9d, 0x90, 0x09, 0x97, 0xb9, 0xa5, 0xdf, 0x67, 0xbd, 0x57, + 0xef, 0xab, 0x9e, 0x03, 0xdf, 0xeb, 0x7e, 0xc8, 0x6a, 0x86, 0x55, 0xef, 0x0e, 0x9a, 0xd4, 0x31, + 0xa9, 0x4b, 0x59, 0x7d, 0x48, 0x4d, 0xdd, 0x72, 0xea, 0x12, 0x41, 0x6c, 0xa3, 0x4e, 0xf4, 0xbe, + 0xc1, 0x98, 0x61, 0x99, 0x0e, 0x6d, 0x1b, 0xcc, 0x75, 0x88, 0x6b, 0x58, 0x66, 0x7d, 0x78, 0xab, + 0x49, 0x5d, 0x72, 0xab, 0xde, 0xa6, 0x26, 0x75, 0x88, 0x4b, 0xf5, 0x9a, 0xed, 0x58, 0xae, 0x85, + 0x36, 0x3d, 0xce, 0x1a, 0xb1, 0x8d, 0x5a, 0x2a, 0x67, 0x4d, 0x72, 0x5e, 0xff, 0x66, 0xdb, 0x70, + 0x3b, 0x83, 0x66, 0x4d, 0xb3, 0xfa, 0xf5, 0xb6, 0xd5, 0xb6, 0xea, 0x42, 0x40, 0x73, 0xd0, 0x12, + 0x5f, 0xe2, 0x43, 0xfc, 0xe5, 0x09, 0xbe, 0xfe, 0x6e, 0x86, 0x23, 0x25, 0x4f, 0x73, 0xfd, 0xbd, + 0x90, 0xa9, 0x4f, 0xb4, 0x8e, 0x61, 0x52, 0xe7, 0xb8, 0x6e, 0x77, 0xdb, 0x1c, 0xc0, 0xea, 0x7d, + 0xea, 0x92, 0x34, 0xae, 0xfa, 0x34, 0x2e, 0x67, 0x60, 0xba, 0x46, 0x9f, 0x4e, 0x30, 0xbc, 0x7f, + 0x16, 0x03, 0xd3, 0x3a, 0xb4, 0x4f, 0x92, 0x7c, 0x2a, 0x83, 0xd5, 0xed, 0x81, 0x6e, 0xb8, 0xdb, + 0xa6, 0x69, 0xb9, 0xc2, 0x08, 0xf4, 0x16, 0xe4, 0xba, 0xf4, 0xb8, 0xac, 0x6c, 0x28, 0x9b, 0x85, + 0x46, 0xf1, 0xd9, 0xa8, 0x3a, 0x33, 0x1e, 0x55, 0x73, 0x9f, 0xd0, 0x63, 0xcc, 0xe1, 0x68, 0x1b, + 0x56, 0x87, 0xa4, 0x37, 0xa0, 0x77, 0x9e, 0xda, 0x0e, 0x15, 0x2e, 0x28, 0xcf, 0x0a, 0xd2, 0x75, + 0x49, 0xba, 0x7a, 0x18, 0x47, 0xe3, 0x24, 0xbd, 0xda, 0x83, 0x52, 0xf8, 0x75, 0x44, 0x1c, 0xd3, + 0x30, 0xdb, 0xe8, 0x1b, 0x90, 0x6f, 0x19, 0xb4, 0xa7, 0x63, 0xda, 0x92, 0x02, 0xd7, 0xa4, 0xc0, + 0xfc, 0x5d, 0x09, 0xc7, 0x01, 0x05, 0xba, 0x09, 0x8b, 0x4f, 0x3c, 0xc6, 0x72, 0x4e, 0x10, 0xaf, + 0x4a, 0xe2, 0x45, 0x29, 0x0f, 0xfb, 0x78, 0xb5, 0x05, 0x2b, 0xbb, 0xc4, 0xd5, 0x3a, 0x3b, 0x96, + 0xa9, 0x1b, 0xc2, 0xc2, 0x0d, 0x98, 0x33, 0x49, 0x9f, 0x4a, 0x13, 0x97, 0x24, 0xe7, 0xdc, 0x1e, + 0xe9, 0x53, 0x2c, 0x30, 0x68, 0x0b, 0x80, 0x26, 0xed, 0x43, 0x92, 0x0e, 0x22, 0xa6, 0x45, 0xa8, + 0xd4, 0x3f, 0xcd, 0x49, 0x45, 0x98, 0x32, 0x6b, 0xe0, 0x68, 0x94, 0xa1, 0xa7, 0x50, 0xe2, 0xe2, + 0x98, 0x4d, 0x34, 0x7a, 0x40, 0x7b, 0x54, 0x73, 0x2d, 0x47, 0x68, 0x2d, 0x6e, 0xbd, 0x5b, 0x0b, + 0xc3, 0x34, 0xb8, 0xb1, 0x9a, 0xdd, 0x6d, 0x73, 0x00, 0xab, 0xf1, 0xc0, 0xa8, 0x0d, 0x6f, 0xd5, + 0x1e, 0x92, 0x26, 0xed, 0xf9, 0xac, 0x8d, 0xd7, 0xc7, 0xa3, 0x6a, 0x69, 0x2f, 0x29, 0x11, 0x4f, + 0x2a, 0x41, 0x16, 0xac, 0x58, 0xcd, 0x1f, 0x52, 0xcd, 0x0d, 0xd4, 0xce, 0x5e, 0x5c, 0x2d, 0x1a, + 0x8f, 0xaa, 0x2b, 0x8f, 0x62, 0xe2, 0x70, 0x42, 0x3c, 0xfa, 0x29, 0x2c, 0x3b, 0xd2, 0x6e, 0x3c, + 0xe8, 0x51, 0x56, 0xce, 0x6d, 0xe4, 0x36, 0x8b, 0x5b, 0xdb, 0xb5, 0xac, 0xd9, 0x58, 0xe3, 0x76, + 0xe9, 0x9c, 0xf7, 0xc8, 0x70, 0x3b, 0x8f, 0x6c, 0xea, 0xa1, 0x59, 0xe3, 0x75, 0xe9, 0xf7, 0x65, + 0x1c, 0x95, 0x8f, 0xe3, 0xea, 0xd0, 0xaf, 0x14, 0xb8, 0x46, 0x9f, 0x6a, 0xbd, 0x81, 0x4e, 0x63, + 0x74, 0xe5, 0xb9, 0xcb, 0x3a, 0xc7, 0x0d, 0x79, 0x8e, 0x6b, 0x77, 0x52, 0xd4, 0xe0, 0x54, 0xe5, + 0xe8, 0x63, 0x28, 0xf6, 0x79, 0x48, 0xec, 0x5b, 0x3d, 0x43, 0x3b, 0x2e, 0x2f, 0x8a, 0x40, 0x52, + 0xc7, 0xa3, 0x6a, 0x71, 0x37, 0x04, 0x9f, 0x8e, 0xaa, 0xab, 0x91, 0xcf, 0x4f, 0x8f, 0x6d, 0x8a, + 0xa3, 0x6c, 0xea, 0x1f, 0xf3, 0xb0, 0xba, 0x3b, 0xe0, 0xe9, 0x69, 0xb6, 0x8f, 0x68, 0xb3, 0x63, + 0x59, 0xdd, 0x0c, 0x31, 0xfc, 0x04, 0x96, 0xb4, 0x9e, 0x41, 0x4d, 0x77, 0xc7, 0x32, 0x5b, 0x46, + 0x5b, 0x06, 0xc0, 0xb7, 0xb3, 0x3b, 0x42, 0xaa, 0xda, 0x89, 0x08, 0x69, 0x5c, 0x93, 0x8a, 0x96, + 0xa2, 0x50, 0x1c, 0x53, 0x84, 0x3e, 0x87, 0x79, 0x27, 0x12, 0x02, 0x1f, 0x64, 0xd1, 0x58, 0x4b, + 0x71, 0xf8, 0xb2, 0xd4, 0x35, 0xef, 0x79, 0xd8, 0x13, 0x8a, 0x1e, 0xc2, 0x72, 0x8b, 0x18, 0xbd, + 0x81, 0x43, 0xa5, 0x53, 0xe7, 0x84, 0x07, 0xbe, 0xce, 0x23, 0xe4, 0x6e, 0x14, 0x71, 0x3a, 0xaa, + 0x96, 0x62, 0x00, 0xe1, 0xd8, 0x38, 0x73, 0xf2, 0x82, 0x0a, 0x17, 0xba, 0xa0, 0xf4, 0x3c, 0x9f, + 0xff, 0xff, 0xe4, 0x79, 0xf1, 0xe5, 0xe6, 0xf9, 0xc7, 0x50, 0x64, 0x86, 0x4e, 0xef, 0xb4, 0x5a, + 0x54, 0x73, 0x59, 0x79, 0x21, 0x74, 0xd8, 0x41, 0x08, 0xe6, 0x0e, 0x0b, 0x3f, 0x77, 0x7a, 0x84, + 0x31, 0x1c, 0x65, 0x43, 0xb7, 0x61, 0x85, 0x77, 0x25, 0x6b, 0xe0, 0x1e, 0x50, 0xcd, 0x32, 0x75, + 0x26, 0x52, 0x63, 0xde, 0x3b, 0xc1, 0xa7, 0x31, 0x0c, 0x4e, 0x50, 0xa2, 0xcf, 0x60, 0x3d, 0x88, + 0x22, 0x4c, 0x87, 0x06, 0x7d, 0x72, 0x48, 0x1d, 0xfe, 0xc1, 0xca, 0xf9, 0x8d, 0xdc, 0x66, 0xa1, + 0xf1, 0xe6, 0x78, 0x54, 0x5d, 0xdf, 0x4e, 0x27, 0xc1, 0xd3, 0x78, 0xd1, 0x63, 0x40, 0x0e, 0x35, + 0xcc, 0xa1, 0xa5, 0x89, 0xf0, 0x93, 0x01, 0x01, 0xc2, 0xbe, 0x77, 0xc6, 0xa3, 0x2a, 0xc2, 0x13, + 0xd8, 0xd3, 0x51, 0xf5, 0x6b, 0x93, 0x50, 0x11, 0x1e, 0x29, 0xb2, 0xd0, 0x4f, 0x60, 0xb5, 0x1f, + 0x6b, 0x44, 0xac, 0xbc, 0x24, 0x32, 0xe4, 0xc3, 0xec, 0x39, 0x19, 0xef, 0x64, 0x61, 0xcf, 0x8d, + 0xc3, 0x19, 0x4e, 0x6a, 0x52, 0xff, 0xa2, 0xc0, 0x8d, 0x44, 0x0d, 0xf1, 0xd2, 0x75, 0xe0, 0x69, + 0x40, 0x8f, 0x21, 0xcf, 0xa3, 0x42, 0x27, 0x2e, 0x91, 0x2d, 0xea, 0x9d, 0x6c, 0x31, 0xe4, 0x05, + 0xcc, 0x2e, 0x75, 0x49, 0xd8, 0x22, 0x43, 0x18, 0x0e, 0xa4, 0xa2, 0x1f, 0x40, 0x5e, 0x6a, 0x66, + 0xe5, 0x59, 0x61, 0xf8, 0xb7, 0xce, 0x61, 0x78, 0xfc, 0xec, 0x8d, 0x39, 0xae, 0x0a, 0x07, 0x02, + 0xd5, 0x7f, 0x28, 0xb0, 0xf1, 0x22, 0xfb, 0x1e, 0x1a, 0xcc, 0x45, 0x9f, 0x4f, 0xd8, 0x58, 0xcb, + 0x98, 0x27, 0x06, 0xf3, 0x2c, 0x0c, 0x66, 0x12, 0x1f, 0x12, 0xb1, 0xaf, 0x0b, 0xf3, 0x86, 0x4b, + 0xfb, 0xbe, 0x71, 0x77, 0x2f, 0x6c, 0x5c, 0xec, 0xe0, 0x61, 0x19, 0xbc, 0xcf, 0x85, 0x63, 0x4f, + 0x87, 0xfa, 0x5c, 0x81, 0xf5, 0x29, 0x9d, 0x0a, 0x7d, 0x10, 0xf6, 0x62, 0x51, 0x44, 0xca, 0x8a, + 0xc8, 0x8b, 0x52, 0xb4, 0x89, 0x0a, 0x04, 0x8e, 0xd3, 0xa1, 0x5f, 0x28, 0x80, 0x9c, 0x09, 0x79, + 0xb2, 0x73, 0x5c, 0xb8, 0x8e, 0x5f, 0x97, 0x06, 0xa0, 0x49, 0x1c, 0x4e, 0x51, 0xa7, 0x12, 0x28, + 0xec, 0x13, 0x87, 0xf4, 0x3f, 0x31, 0x4c, 0x9d, 0x4f, 0x62, 0xc4, 0x36, 0x64, 0x96, 0xca, 0x6e, + 0x17, 0x84, 0xd9, 0xf6, 0xfe, 0x7d, 0x89, 0xc1, 0x11, 0x2a, 0xde, 0x1b, 0xbb, 0x86, 0xa9, 0xcb, + 0xb9, 0x2d, 0xe8, 0x8d, 0x5c, 0x1e, 0x16, 0x18, 0xf5, 0x77, 0xb3, 0x90, 0x17, 0x3a, 0xf8, 0x2c, + 0x79, 0x76, 0x2b, 0xad, 0x43, 0x21, 0x28, 0xbd, 0x52, 0x6a, 0x49, 0x92, 0x15, 0x82, 0x32, 0x8d, + 0x43, 0x1a, 0xf4, 0x05, 0xe4, 0x99, 0x5f, 0x90, 0x73, 0x17, 0x2f, 0xc8, 0x4b, 0x3c, 0xd2, 0x82, + 0x52, 0x1c, 0x88, 0x44, 0x2e, 0xac, 0xdb, 0xfc, 0xf4, 0xd4, 0xa5, 0xce, 0x9e, 0xe5, 0xde, 0xb5, + 0x06, 0xa6, 0xbe, 0xad, 0x71, 0xef, 0xc9, 0x6e, 0x78, 0x9b, 0x97, 0xc0, 0xfd, 0x74, 0x92, 0xd3, + 0x51, 0xf5, 0xcd, 0x29, 0x28, 0x51, 0xba, 0xa6, 0x89, 0x56, 0x7f, 0xab, 0xc0, 0xda, 0x01, 0x75, + 0x86, 0x86, 0x46, 0x31, 0x6d, 0x51, 0x87, 0x9a, 0x5a, 0xc2, 0x35, 0x4a, 0x06, 0xd7, 0xf8, 0xde, + 0x9e, 0x9d, 0xea, 0xed, 0x1b, 0x30, 0x67, 0x13, 0xb7, 0x23, 0x07, 0xfb, 0x3c, 0xc7, 0xee, 0x13, + 0xb7, 0x83, 0x05, 0x54, 0x60, 0x2d, 0xc7, 0x15, 0x86, 0xce, 0x4b, 0xac, 0xe5, 0xb8, 0x58, 0x40, + 0xd5, 0x5f, 0x2b, 0xb0, 0xc4, 0xad, 0xd8, 0xe9, 0x50, 0xad, 0xcb, 0x9f, 0x15, 0x5f, 0x2a, 0x80, + 0x68, 0xf2, 0xb1, 0xe1, 0x65, 0x44, 0x71, 0xeb, 0xa3, 0xec, 0x29, 0x3a, 0xf1, 0x60, 0x09, 0xc3, + 0x7a, 0x02, 0xc5, 0x70, 0x8a, 0x4a, 0xf5, 0xcf, 0xb3, 0xf0, 0xc6, 0x21, 0xe9, 0x19, 0xba, 0x48, + 0xf5, 0xa0, 0x3f, 0xc9, 0xe6, 0xf0, 0xf2, 0xcb, 0xaf, 0x01, 0x73, 0xcc, 0xa6, 0x9a, 0xcc, 0xe6, + 0x7b, 0xd9, 0x4d, 0x9f, 0x7a, 0xe8, 0x03, 0x9b, 0x6a, 0xe1, 0x0d, 0xf2, 0x2f, 0x2c, 0x54, 0xa0, + 0x1f, 0xc1, 0x02, 0x73, 0x89, 0x3b, 0x60, 0x32, 0xf8, 0xef, 0x5f, 0x86, 0x32, 0x21, 0xb0, 0xb1, + 0x22, 0xd5, 0x2d, 0x78, 0xdf, 0x58, 0x2a, 0x52, 0xff, 0xad, 0xc0, 0xc6, 0x54, 0xde, 0x86, 0x61, + 0xea, 0x3c, 0x18, 0x5e, 0xbe, 0x93, 0xed, 0x98, 0x93, 0xf7, 0x2e, 0xc1, 0x6e, 0x79, 0xf6, 0x69, + 0xbe, 0x56, 0xff, 0xa5, 0xc0, 0xdb, 0x67, 0x31, 0x5f, 0x41, 0xf3, 0xb3, 0xe2, 0xcd, 0xef, 0xc1, + 0xe5, 0x59, 0x3e, 0xa5, 0x01, 0x7e, 0x99, 0x3b, 0xdb, 0x6e, 0xee, 0x26, 0xde, 0x41, 0x6c, 0x01, + 0xdc, 0x0b, 0x8b, 0x7c, 0x70, 0x89, 0xfb, 0x01, 0x06, 0x47, 0xa8, 0xb8, 0xaf, 0x6c, 0xd9, 0x1e, + 0xe4, 0x55, 0x6e, 0x65, 0x37, 0xc8, 0x6f, 0x2c, 0x5e, 0xf9, 0xf6, 0xbf, 0x70, 0x20, 0x11, 0xb9, + 0xb0, 0xd2, 0x8f, 0x2d, 0x0a, 0x64, 0x9a, 0x9c, 0x77, 0x0e, 0x0c, 0xf8, 0xbd, 0xb9, 0x39, 0x0e, + 0xc3, 0x09, 0x1d, 0xe8, 0x08, 0x4a, 0x43, 0xe9, 0x2f, 0xcb, 0xf4, 0x4a, 0xba, 0xf7, 0x3a, 0x2e, + 0x34, 0x6e, 0xf2, 0xf7, 0xc6, 0x61, 0x12, 0x79, 0x3a, 0xaa, 0xae, 0x25, 0x81, 0x78, 0x52, 0x86, + 0xfa, 0x77, 0x05, 0xde, 0x9a, 0x7a, 0x13, 0x57, 0x10, 0x7a, 0x9d, 0x78, 0xe8, 0xed, 0x5c, 0x46, + 0xe8, 0xa5, 0xc7, 0xdc, 0x6f, 0x16, 0x5e, 0x60, 0xa9, 0x08, 0xb6, 0xc7, 0x50, 0xb0, 0xfd, 0xd9, + 0x25, 0x65, 0xd3, 0x93, 0x25, 0x72, 0x38, 0x6b, 0x63, 0x99, 0xf7, 0xcf, 0xe0, 0x13, 0x87, 0x42, + 0xd1, 0x8f, 0x61, 0xcd, 0x9f, 0xed, 0x39, 0xbf, 0x61, 0xba, 0xfe, 0x80, 0x76, 0xf1, 0xf0, 0xb9, + 0x36, 0x1e, 0x55, 0xd7, 0x76, 0x13, 0x52, 0xf1, 0x84, 0x1e, 0xd4, 0x85, 0x62, 0x78, 0xfd, 0xfe, + 0xfb, 0xfe, 0xbd, 0xf3, 0xfb, 0xdb, 0x32, 0x1b, 0xaf, 0x49, 0x07, 0x17, 0x43, 0x18, 0xc3, 0x51, + 0xe9, 0x97, 0xfc, 0xd0, 0xff, 0x19, 0xac, 0x91, 0xf8, 0xa2, 0x93, 0x95, 0xe7, 0xcf, 0xfb, 0x08, + 0x49, 0xac, 0x4a, 0x1b, 0x65, 0x69, 0xc4, 0x5a, 0x02, 0xc1, 0xf0, 0x84, 0xb2, 0xb4, 0xd7, 0xdf, + 0xc2, 0x55, 0xbd, 0xfe, 0x90, 0x06, 0x85, 0x21, 0x71, 0x0c, 0xd2, 0xec, 0x51, 0xfe, 0xd4, 0xce, + 0x9d, 0xaf, 0xa0, 0x1d, 0x4a, 0xd6, 0x70, 0xb2, 0xf3, 0x21, 0x0c, 0x87, 0x72, 0xd5, 0x3f, 0xcc, + 0x42, 0xf5, 0x8c, 0xf6, 0x8d, 0x1e, 0x00, 0xb2, 0x9a, 0x8c, 0x3a, 0x43, 0xaa, 0xdf, 0xf3, 0x56, + 0xd1, 0xfe, 0x58, 0x9f, 0x0b, 0x07, 0xaa, 0x47, 0x13, 0x14, 0x38, 0x85, 0x0b, 0xf5, 0x60, 0xc9, + 0x8d, 0x8c, 0x7a, 0x32, 0x0b, 0xde, 0xcf, 0x6e, 0x57, 0x74, 0x50, 0x6c, 0xac, 0x8d, 0x47, 0xd5, + 0xd8, 0xe8, 0x88, 0x63, 0xd2, 0x91, 0x06, 0xa0, 0x85, 0x57, 0xe7, 0x85, 0x7e, 0x3d, 0x5b, 0x15, + 0x0b, 0x6f, 0x2c, 0xe8, 0x3b, 0x91, 0xcb, 0x8a, 0x88, 0x55, 0x4f, 0x16, 0xa1, 0x14, 0xba, 0xf0, + 0xd5, 0xae, 0xef, 0xd5, 0xae, 0xef, 0x85, 0xbb, 0x3e, 0x78, 0xb5, 0xeb, 0xbb, 0xd0, 0xae, 0x2f, + 0xa5, 0x16, 0x17, 0xaf, 0x6c, 0x13, 0x77, 0xa2, 0x40, 0x65, 0x22, 0xc7, 0xaf, 0x7a, 0x17, 0xf7, + 0xc5, 0xc4, 0x2e, 0xee, 0xa3, 0x8b, 0x8c, 0x4d, 0xd3, 0xb6, 0x71, 0xff, 0x54, 0x40, 0x7d, 0xb1, + 0x8d, 0x57, 0x30, 0x17, 0xf6, 0xe3, 0x73, 0xe1, 0x77, 0xff, 0x07, 0x03, 0xb3, 0x6c, 0xe4, 0xfe, + 0xa3, 0x00, 0x84, 0xc3, 0x0c, 0x7a, 0x1b, 0x22, 0x3f, 0x14, 0xca, 0xd2, 0xed, 0xb9, 0x29, 0x02, + 0x47, 0x37, 0x61, 0xb1, 0x4f, 0x19, 0x23, 0x6d, 0x7f, 0x21, 0x12, 0xfc, 0x8e, 0xb9, 0xeb, 0x81, + 0xb1, 0x8f, 0x47, 0x47, 0xb0, 0xe0, 0x50, 0xc2, 0x2c, 0x53, 0x2e, 0x46, 0xbe, 0xc3, 0x5f, 0xc1, + 0x58, 0x40, 0x4e, 0x47, 0xd5, 0x5b, 0x59, 0x7e, 0x67, 0xae, 0xc9, 0x47, 0xb3, 0x60, 0xc2, 0x52, + 0x1c, 0xba, 0x07, 0x25, 0xa9, 0x23, 0x72, 0x60, 0xaf, 0xd2, 0xbe, 0x21, 0x4f, 0x53, 0xda, 0x4d, + 0x12, 0xe0, 0x49, 0x1e, 0xf5, 0x01, 0xe4, 0xfd, 0xc1, 0x00, 0x95, 0x61, 0x2e, 0xf2, 0xde, 0xf2, + 0x0c, 0x17, 0x90, 0x84, 0x63, 0x66, 0xd3, 0x1d, 0xa3, 0xfe, 0x5e, 0x81, 0xd7, 0x52, 0x9a, 0x12, + 0x7a, 0x03, 0x72, 0x03, 0xa7, 0x27, 0x5d, 0xb0, 0x38, 0x1e, 0x55, 0x73, 0x9f, 0xe1, 0x87, 0x98, + 0xc3, 0x10, 0x81, 0x45, 0xe6, 0xad, 0xa7, 0x64, 0x30, 0xdd, 0xce, 0x7e, 0xe3, 0xc9, 0xbd, 0x56, + 0xa3, 0xc8, 0xef, 0xc0, 0x87, 0xfa, 0x72, 0xd1, 0x26, 0xe4, 0x35, 0xd2, 0x18, 0x98, 0x7a, 0xcf, + 0xbb, 0xaf, 0x25, 0xef, 0x8d, 0xb7, 0xb3, 0xed, 0xc1, 0x70, 0x80, 0x6d, 0xec, 0x3d, 0x3b, 0xa9, + 0xcc, 0x7c, 0x75, 0x52, 0x99, 0x79, 0x7e, 0x52, 0x99, 0xf9, 0xf9, 0xb8, 0xa2, 0x3c, 0x1b, 0x57, + 0x94, 0xaf, 0xc6, 0x15, 0xe5, 0xf9, 0xb8, 0xa2, 0xfc, 0x75, 0x5c, 0x51, 0x7e, 0xf9, 0xb7, 0xca, + 0xcc, 0xf7, 0x37, 0xb3, 0xfe, 0x97, 0xc3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x71, 0x54, 0x54, + 0xe6, 0x29, 0x21, 0x00, 0x00, +} + +func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditAnnotation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditAnnotation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ValueExpression) + copy(dAtA[i:], m.ValueExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValueExpression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExpressionWarning) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExpressionWarning) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExpressionWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Warning) + copy(dAtA[i:], m.Warning) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Warning))) + i-- + dAtA[i] = 0x1a + i -= len(m.FieldRef) + copy(dAtA[i:], m.FieldRef) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldRef))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} + +func (m *MatchCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MatchResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x3a + } + if len(m.ExcludeResourceRules) > 0 { + for iNdEx := len(m.ExcludeResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ExcludeResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -435,6 +1107,20 @@ func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } if m.ObjectSelector != nil { { size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -627,7 +1313,7 @@ func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (in return len(dAtA) - i, nil } -func (m *Rule) Marshal() (dAtA []byte, err error) { +func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -637,46 +1323,31 @@ func (m *Rule) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Rule) MarshalTo(dAtA []byte) (int, error) { +func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Rule) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Scope != nil { - i -= len(*m.Scope) - copy(dAtA[i:], *m.Scope) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) - i-- - dAtA[i] = 0x22 - } - if len(m.Resources) > 0 { - for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Resources[iNdEx]) - copy(dAtA[i:], m.Resources[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.APIVersions) > 0 { - for iNdEx := len(m.APIVersions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.APIVersions[iNdEx]) - copy(dAtA[i:], m.APIVersions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersions[iNdEx]))) - i-- - dAtA[i] = 0x12 + { + size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.APIGroups) > 0 { - for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.APIGroups[iNdEx]) - copy(dAtA[i:], m.APIGroups[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) i-- dAtA[i] = 0xa } @@ -684,7 +1355,7 @@ func (m *Rule) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { +func (m *ParamKind) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -694,39 +1365,30 @@ func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { +func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Rule.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) i-- dAtA[i] = 0x12 - if len(m.Operations) > 0 { - for iNdEx := len(m.Operations) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Operations[iNdEx]) - copy(dAtA[i:], m.Operations[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operations[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ServiceReference) Marshal() (dAtA []byte, err error) { +func (m *ParamRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -736,42 +1398,49 @@ func (m *ServiceReference) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { +func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Port != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) - i-- - dAtA[i] = 0x20 - } - if m.Path != nil { - i -= len(*m.Path) - copy(dAtA[i:], *m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + if m.ParameterNotFoundAction != nil { + i -= len(*m.ParameterNotFoundAction) + copy(dAtA[i:], *m.ParameterNotFoundAction) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction))) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x22 + } + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 i -= len(m.Namespace) copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -781,79 +1450,65 @@ func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ObjectSelector != nil { - { - size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - if m.MatchPolicy != nil { - i -= len(*m.MatchPolicy) - copy(dAtA[i:], *m.MatchPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) - i-- - dAtA[i] = 0x4a - } - if len(m.AdmissionReviewVersions) > 0 { - for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AdmissionReviewVersions[iNdEx]) - copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) - i-- - dAtA[i] = 0x42 - } - } - if m.TimeoutSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - i-- - dAtA[i] = 0x38 - } - if m.SideEffects != nil { - i -= len(*m.SideEffects) - copy(dAtA[i:], *m.SideEffects) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) i-- - dAtA[i] = 0x32 + dAtA[i] = 0x20 } - if m.NamespaceSelector != nil { - { - size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + if m.Path != nil { + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) i-- - dAtA[i] = 0x2a + dAtA[i] = 0x1a } - if m.FailurePolicy != nil { - i -= len(*m.FailurePolicy) - copy(dAtA[i:], *m.FailurePolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i-- - dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TypeChecking) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + return dAtA[:n], nil +} + +func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ExpressionWarnings) > 0 { + for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -861,11 +1516,44 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a { - size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -874,15 +1562,20 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -892,30 +1585,26 @@ func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Webhooks) > 0 { - for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 { size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -929,7 +1618,7 @@ func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -939,12 +1628,12 @@ func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -976,7 +1665,7 @@ func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) ( return len(dAtA) - i, nil } -func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -986,33 +1675,40 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.URL != nil { - i -= len(*m.URL) - copy(dAtA[i:], *m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i-- - dAtA[i] = 0x1a + if len(m.ValidationActions) > 0 { + for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ValidationActions[iNdEx]) + copy(dAtA[i:], m.ValidationActions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx]))) + i-- + dAtA[i] = 0x22 + } } - if m.CABundle != nil { - i -= len(m.CABundle) - copy(dAtA[i:], m.CABundle) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + if m.MatchResources != nil { + { + size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } - if m.Service != nil { + if m.ParamRef != nil { { - size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1020,456 +1716,3443 @@ func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x12 } + i -= len(m.PolicyName) + copy(dAtA[i:], m.PolicyName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName))) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *MutatingWebhook) Size() (n int) { - if m == nil { - return 0 - } + +func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ClientConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.FailurePolicy != nil { - l = len(*m.FailurePolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SideEffects != nil { - l = len(*m.SideEffects) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) - } - if len(m.AdmissionReviewVersions) > 0 { - for _, s := range m.AdmissionReviewVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - if m.MatchPolicy != nil { - l = len(*m.MatchPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ReinvocationPolicy != nil { - l = len(*m.ReinvocationPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ObjectSelector != nil { - l = m.ObjectSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *MutatingWebhookConfiguration) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Webhooks) > 0 { - for _, e := range m.Webhooks { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *MutatingWebhookConfigurationList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n +func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Rule) Size() (n int) { - if m == nil { - return 0 - } +func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Variables) > 0 { + for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a } } - if len(m.APIVersions) > 0 { - for _, s := range m.APIVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.AuditAnnotations) > 0 { + for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } } - if m.Scope != nil { - l = len(*m.Scope) - n += 1 + l + sovGenerated(uint64(l)) + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 } - return n -} - -func (m *RuleWithOperations) Size() (n int) { - if m == nil { - return 0 + if len(m.Validations) > 0 { + for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - var l int - _ = l - if len(m.Operations) > 0 { - for _, s := range m.Operations { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.MatchConstraints != nil { + { + size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - l = m.Rule.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + if m.ParamKind != nil { + { + size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ServiceReference) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Path != nil { - l = len(*m.Path) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - if m.Port != nil { - n += 1 + sovGenerated(uint64(*m.Port)) + if m.TypeChecking != nil { + { + size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - return n + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *ValidatingWebhook) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ClientConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a } } - if m.FailurePolicy != nil { - l = len(*m.FailurePolicy) - n += 1 + l + sovGenerated(uint64(l)) + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a } - if m.SideEffects != nil { - l = len(*m.SideEffects) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } } if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 } - if len(m.AdmissionReviewVersions) > 0 { - for _, s := range m.AdmissionReviewVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - if m.MatchPolicy != nil { - l = len(*m.MatchPolicy) - n += 1 + l + sovGenerated(uint64(l)) + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 } - if m.ObjectSelector != nil { - l = m.ObjectSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - return n + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ValidatingWebhookConfiguration) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Webhooks) > 0 { - for _, e := range m.Webhooks { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ValidatingWebhookConfigurationList) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *WebhookClientConfig) Size() (n int) { - if m == nil { - return 0 +func (m *Validation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *Validation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Service != nil { - l = m.Service.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CABundle != nil { - l = len(m.CABundle) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.URL != nil { - l = len(*m.URL) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.MessageExpression) + copy(dAtA[i:], m.MessageExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + i-- + dAtA[i] = 0x22 + if m.Reason != nil { + i -= len(*m.Reason) + copy(dAtA[i:], *m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i-- + dAtA[i] = 0x1a } - return n + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 +func (m *Variable) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + +func (m *Variable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (this *MutatingWebhook) String() string { - if this == nil { - return "nil" - } - repeatedStringForRules := "[]RuleWithOperations{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," - } - repeatedStringForRules += "}" - s := strings.Join([]string{`&MutatingWebhook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, - `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, - `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, - `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `}`, - }, "") - return s + +func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (this *MutatingWebhookConfiguration) String() string { - if this == nil { - return "nil" - } - repeatedStringForWebhooks := "[]MutatingWebhook{" - for _, f := range this.Webhooks { - repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - repeatedStringForWebhooks += "}" - s := strings.Join([]string{`&MutatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + repeatedStringForWebhooks + `,`, - `}`, - }, "") - return s + return dAtA[:n], nil } -func (this *MutatingWebhookConfigurationList) String() string { - if this == nil { - return "nil" + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a } - repeatedStringForItems := "[]MutatingWebhookConfiguration{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x12 } - repeatedStringForItems += "}" - s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *Rule) String() string { - if this == nil { - return "nil" + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - s := strings.Join([]string{`&Rule{`, - `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, - `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, - `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, - `Scope:` + valueToStringGenerated(this.Scope) + `,`, - `}`, - }, "") - return s + return len(dAtA) - i, nil } -func (this *RuleWithOperations) String() string { - if this == nil { - return "nil" + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - s := strings.Join([]string{`&RuleWithOperations{`, - `Operations:` + fmt.Sprintf("%v", this.Operations) + `,`, - `Rule:` + strings.Replace(strings.Replace(this.Rule.String(), "Rule", "Rule", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + dAtA[offset] = uint8(v) + return base } -func (this *ServiceReference) String() string { - if this == nil { - return "nil" +func (m *AuditAnnotation) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ServiceReference{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Path:` + valueToStringGenerated(this.Path) + `,`, - `Port:` + valueToStringGenerated(this.Port) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ValueExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ValidatingWebhook) String() string { - if this == nil { - return "nil" + +func (m *ExpressionWarning) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForRules := "[]RuleWithOperations{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + var l int + _ = l + l = len(m.FieldRef) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Warning) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MatchCondition) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForRules += "}" - s := strings.Join([]string{`&ValidatingWebhook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, - `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, - `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ValidatingWebhookConfiguration) String() string { - if this == nil { - return "nil" + +func (m *MatchResources) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForWebhooks := "[]ValidatingWebhook{" - for _, f := range this.Webhooks { - repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + var l int + _ = l + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForWebhooks += "}" - s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + repeatedStringForWebhooks + `,`, - `}`, - }, "") - return s -} -func (this *ValidatingWebhookConfigurationList) String() string { - if this == nil { - return "nil" + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForItems := "[]ValidatingWebhookConfiguration{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *WebhookClientConfig) String() string { - if this == nil { - return "nil" + if len(m.ExcludeResourceRules) > 0 { + for _, e := range m.ExcludeResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&WebhookClientConfig{`, - `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, - `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, - `URL:` + valueToStringGenerated(this.URL) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return n } -func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { + +func (m *MutatingWebhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReinvocationPolicy != nil { + l = len(*m.ReinvocationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MutatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MutatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NamedRuleWithOperations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RuleWithOperations.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ParamKind) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ParamRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ParameterNotFoundAction != nil { + l = len(*m.ParameterNotFoundAction) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func (m *TypeChecking) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ExpressionWarnings) > 0 { + for _, e := range m.ExpressionWarnings { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ValidatingAdmissionPolicyBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PolicyName) + n += 1 + l + sovGenerated(uint64(l)) + if m.ParamRef != nil { + l = m.ParamRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MatchResources != nil { + l = m.MatchResources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ValidationActions) > 0 { + for _, s := range m.ValidationActions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParamKind != nil { + l = m.ParamKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MatchConstraints != nil { + l = m.MatchConstraints.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Validations) > 0 { + for _, e := range m.Validations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AuditAnnotations) > 0 { + for _, e := range m.AuditAnnotations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Variables) > 0 { + for _, e := range m.Variables { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if m.TypeChecking != nil { + l = m.TypeChecking.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Validation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.Reason != nil { + l = len(*m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.MessageExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Variable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AuditAnnotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditAnnotation{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`, + `}`, + }, "") + return s +} +func (this *ExpressionWarning) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExpressionWarning{`, + `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`, + `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`, + `}`, + }, "") + return s +} +func (this *MatchCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MatchCondition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *MatchResources) String() string { + if this == nil { + return "nil" + } + repeatedStringForResourceRules := "[]NamedRuleWithOperations{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{" + for _, f := range this.ExcludeResourceRules { + repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForExcludeResourceRules += "}" + s := strings.Join([]string{`&MatchResources{`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += fmt.Sprintf("%v", f) + "," + } + repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + s := strings.Join([]string{`&MutatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]MutatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&MutatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]MutatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NamedRuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedRuleWithOperations{`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ParamKind) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParamKind{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *ParamRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParamRef{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *TypeChecking) String() string { + if this == nil { + return "nil" + } + repeatedStringForExpressionWarnings := "[]ExpressionWarning{" + for _, f := range this.ExpressionWarnings { + repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + "," + } + repeatedStringForExpressionWarnings += "}" + s := strings.Join([]string{`&TypeChecking{`, + `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBindingSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`, + `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, + `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, + `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, + `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingAdmissionPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForValidations := "[]Validation{" + for _, f := range this.Validations { + repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + "," + } + repeatedStringForValidations += "}" + repeatedStringForAuditAnnotations := "[]AuditAnnotation{" + for _, f := range this.AuditAnnotations { + repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + "," + } + repeatedStringForAuditAnnotations += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + repeatedStringForVariables := "[]Variable{" + for _, f := range this.Variables { + repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + "," + } + repeatedStringForVariables += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`, + `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, + `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, + `Validations:` + repeatedStringForValidations + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `Variables:` + repeatedStringForVariables + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += fmt.Sprintf("%v", f) + "," + } + repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + s := strings.Join([]string{`&ValidatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]ValidatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Validation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Validation{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + valueToStringGenerated(this.Reason) + `,`, + `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, + `}`, + }, "") + return s +} +func (this *Variable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Variable{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValueExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldRef = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warning = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MatchCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MatchResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{}) + if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, v11.RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) + m.ReinvocationPolicy = &s + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, MutatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, MutatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedRuleWithOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedRuleWithOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedRuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuleWithOperations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RuleWithOperations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParamKind) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParamKind: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParamKind: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParamRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParamRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParamRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParameterNotFoundAction", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ParameterNotFoundActionType(dAtA[iNdEx:postIndex]) + m.ParameterNotFoundAction = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TypeChecking) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeChecking: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeChecking: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpressionWarnings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExpressionWarnings = append(m.ExpressionWarnings, ExpressionWarning{}) + if err := m.ExpressionWarnings[len(m.ExpressionWarnings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1492,17 +5175,17 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingAdmissionPolicy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1512,27 +5195,28 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1559,13 +5243,13 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1592,47 +5276,63 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, RuleWithOperations{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - s := FailurePolicyType(dAtA[iNdEx:postIndex]) - m.FailurePolicy = &s - iNdEx = postIndex - case 5: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1659,18 +5359,15 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1680,115 +5377,80 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := SideEffectClass(dAtA[iNdEx:postIndex]) - m.SideEffects = &s - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TimeoutSeconds = &v - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - s := MatchPolicyType(dAtA[iNdEx:postIndex]) - m.MatchPolicy = &s - iNdEx = postIndex - case 10: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1798,28 +5460,28 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) - m.ReinvocationPolicy = &s + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 11: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1846,10 +5508,8 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ObjectSelector == nil { - m.ObjectSelector = &v1.LabelSelector{} - } - if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ValidatingAdmissionPolicyBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1874,7 +5534,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } return nil } -func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { +func (m *ValidatingAdmissionPolicyBindingSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1897,15 +5557,47 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PolicyName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PolicyName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParamRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1932,13 +5624,16 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ParamRef == nil { + m.ParamRef = &ParamRef{} + } + if err := m.ParamRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchResources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1965,11 +5660,45 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Webhooks = append(m.Webhooks, MutatingWebhook{}) - if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.MatchResources == nil { + m.MatchResources = &MatchResources{} + } + if err := m.MatchResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidationActions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidationActions = append(m.ValidationActions, ValidationAction(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1991,7 +5720,7 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { +func (m *ValidatingAdmissionPolicyList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2014,10 +5743,10 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingAdmissionPolicyList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingAdmissionPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -2082,7 +5811,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, MutatingWebhookConfiguration{}) + m.Items = append(m.Items, ValidatingAdmissionPolicy{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2108,7 +5837,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } return nil } -func (m *Rule) Unmarshal(dAtA []byte) error { +func (m *ValidatingAdmissionPolicySpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2131,17 +5860,17 @@ func (m *Rule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Rule: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingAdmissionPolicySpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingAdmissionPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ParamKind", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2151,29 +5880,33 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + if m.ParamKind == nil { + m.ParamKind = &ParamKind{} + } + if err := m.ParamKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchConstraints", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2183,29 +5916,33 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + if m.MatchConstraints == nil { + m.MatchConstraints = &MatchResources{} + } + if err := m.MatchConstraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Validations", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2215,27 +5952,29 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + m.Validations = append(m.Validations, Validation{}) + if err := m.Validations[len(m.Validations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2263,64 +6002,48 @@ func (m *Rule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := ScopeType(dAtA[iNdEx:postIndex]) - m.Scope = &s + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.AuditAnnotations = append(m.AuditAnnotations, AuditAnnotation{}) + if err := m.AuditAnnotations[len(m.AuditAnnotations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2330,27 +6053,29 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Variables", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2377,7 +6102,8 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Variables = append(m.Variables, Variable{}) + if err := m.Variables[len(m.Variables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2402,7 +6128,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServiceReference) Unmarshal(dAtA []byte) error { +func (m *ValidatingAdmissionPolicyStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2425,17 +6151,17 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingAdmissionPolicyStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingAdmissionPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) } - var stringLen uint64 + m.ObservedGeneration = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2445,29 +6171,16 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TypeChecking", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2477,62 +6190,33 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if m.TypeChecking == nil { + m.TypeChecking = &TypeChecking{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.TypeChecking.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - s := string(dAtA[iNdEx:postIndex]) - m.Path = &s iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2542,12 +6226,26 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Port = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2692,7 +6390,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, RuleWithOperations{}) + m.Rules = append(m.Rules, v11.RuleWithOperations{}) if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2768,9 +6466,247 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2780,50 +6716,30 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := SideEffectClass(dAtA[iNdEx:postIndex]) - m.SideEffects = &s - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.TimeoutSeconds = &v - case 8: + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2833,29 +6749,81 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + m.Webhooks = append(m.Webhooks, ValidatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 9: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2865,28 +6833,28 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := MatchPolicyType(dAtA[iNdEx:postIndex]) - m.MatchPolicy = &s + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 10: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2913,10 +6881,8 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ObjectSelector == nil { - m.ObjectSelector = &v1.LabelSelector{} - } - if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ValidatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2941,7 +6907,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } return nil } -func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { +func (m *Validation) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2964,17 +6930,17 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + return fmt.Errorf("proto: Validation: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Validation: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2984,30 +6950,29 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Expression = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3017,25 +6982,88 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Webhooks = append(m.Webhooks, ValidatingWebhook{}) - if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_apimachinery_pkg_apis_meta_v1.StatusReason(dAtA[iNdEx:postIndex]) + m.Reason = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.MessageExpression = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -3058,7 +7086,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { +func (m *Variable) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3081,17 +7109,17 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + return fmt.Errorf("proto: Variable: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Variable: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3101,30 +7129,29 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3134,25 +7161,23 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ValidatingWebhookConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Expression = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index 78440764f..1855cdfc4 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -21,6 +21,7 @@ syntax = "proto2"; package k8s.io.api.admissionregistration.v1beta1; +import "k8s.io/api/admissionregistration/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -28,6 +29,180 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/admissionregistration/v1beta1"; +// AuditAnnotation describes how to produce an audit annotation for an API request. +message AuditAnnotation { + // key specifies the audit annotation key. The audit annotation keys of + // a ValidatingAdmissionPolicy must be unique. The key must be a qualified + // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. + // + // The key is combined with the resource name of the + // ValidatingAdmissionPolicy to construct an audit annotation key: + // "{ValidatingAdmissionPolicy name}/{key}". + // + // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy + // and the same audit annotation key, the annotation key will be identical. + // In this case, the first annotation written with the key will be included + // in the audit event and all subsequent annotations with the same key + // will be discarded. + // + // Required. + optional string key = 1; + + // valueExpression represents the expression which is evaluated by CEL to + // produce an audit annotation value. The expression must evaluate to either + // a string or null value. If the expression evaluates to a string, the + // audit annotation is included with the string value. If the expression + // evaluates to null or empty string the audit annotation will be omitted. + // The valueExpression may be no longer than 5kb in length. + // If the result of the valueExpression is more than 10kb in length, it + // will be truncated to 10kb. + // + // If multiple ValidatingAdmissionPolicyBinding resources match an + // API request, then the valueExpression will be evaluated for + // each binding. All unique values produced by the valueExpressions + // will be joined together in a comma-separated list. + // + // Required. + optional string valueExpression = 2; +} + +// ExpressionWarning is a warning information that targets a specific expression. +message ExpressionWarning { + // The path to the field that refers the expression. + // For example, the reference to the expression of the first item of + // validations is "spec.validations[0].expression" + optional string fieldRef = 2; + + // The content of type checking information in a human-readable form. + // Each line of the warning contains the type that the expression is checked + // against, followed by the type check error from the compiler. + optional string warning = 3; +} + +// MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook. +message MatchCondition { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + optional string name = 1; + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + optional string expression = 2; +} + +// MatchResources decides whether to run the admission control policy on an object based +// on whether it meets the match criteria. +// The exclude rules take precedence over include rules (if a resource matches both, it is excluded) +// +structType=atomic +message MatchResources { + // NamespaceSelector decides whether to run the admission control policy on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the policy. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the policy on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; + + // ObjectSelector decides whether to run the validation based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the cel validation, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; + + // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. + // The policy cares about an operation if it matches _any_ Rule. + // +listType=atomic + // +optional + repeated NamedRuleWithOperations resourceRules = 3; + + // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. + // The exclude rules take precedence over include rules (if a resource matches both, it is excluded) + // +listType=atomic + // +optional + repeated NamedRuleWithOperations excludeResourceRules = 4; + + // matchPolicy defines how the "MatchResources" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy. + // + // Defaults to "Equivalent" + // +optional + optional string matchPolicy = 7; +} + // MutatingWebhook describes an admission webhook and the resources and operations it applies to. message MutatingWebhook { // The name of the admission webhook. @@ -47,7 +222,7 @@ message MutatingWebhook { // from putting the cluster in a state which cannot be recovered from without completely // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. - repeated RuleWithOperations rules = 3; + repeated k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3; // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - // allowed values are Ignore or Fail. Defaults to Ignore. @@ -176,6 +351,28 @@ message MutatingWebhook { // Defaults to "Never". // +optional optional string reinvocationPolicy = 10; + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + repeated MatchCondition matchConditions = 12; } // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. @@ -203,61 +400,86 @@ message MutatingWebhookConfigurationList { repeated MutatingWebhookConfiguration items = 2; } -// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended -// to make sure that all the tuple expansions are valid. -message Rule { - // APIGroups is the API groups the resources belong to. '*' is all groups. - // If '*' is present, the length of the slice must be one. +// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames. +// +structType=atomic +message NamedRuleWithOperations { + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +listType=atomic + // +optional + repeated string resourceNames = 1; + + // RuleWithOperations is a tuple of Operations and Resources. + optional k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2; +} + +// ParamKind is a tuple of Group Kind and Version. +// +structType=atomic +message ParamKind { + // APIVersion is the API group version the resources belong to. + // In format of "group/version". // Required. - repeated string apiGroups = 1; + optional string apiVersion = 1; - // APIVersions is the API versions the resources belong to. '*' is all versions. - // If '*' is present, the length of the slice must be one. + // Kind is the API kind the resources belong to. // Required. - repeated string apiVersions = 2; + optional string kind = 2; +} - // Resources is a list of resources this rule applies to. - // - // For example: - // 'pods' means pods. - // 'pods/log' means the log subresource of pods. - // '*' means all resources, but not subresources. - // 'pods/*' means all subresources of pods. - // '*/scale' means all scale subresources. - // '*/*' means all resources and their subresources. +// ParamRef describes how to locate the params to be used as input to +// expressions of rules applied by a policy binding. +// +structType=atomic +message ParamRef { + // name is the name of the resource being referenced. // - // If wildcard is present, the validation rule will ensure resources do not - // overlap with each other. + // One of `name` or `selector` must be set, but `name` and `selector` are + // mutually exclusive properties. If one is set, the other must be unset. // - // Depending on the enclosing object, subresources might not be allowed. - // Required. - repeated string resources = 3; + // A single parameter used for all admission requests can be configured + // by setting the `name` field, leaving `selector` blank, and setting namespace + // if `paramKind` is namespace-scoped. + optional string name = 1; - // scope specifies the scope of this rule. - // Valid values are "Cluster", "Namespaced", and "*" - // "Cluster" means that only cluster-scoped resources will match this rule. - // Namespace API objects are cluster-scoped. - // "Namespaced" means that only namespaced resources will match this rule. - // "*" means that there are no scope restrictions. - // Subresources match the scope of their parent resource. - // Default is "*". + // namespace is the namespace of the referenced resource. Allows limiting + // the search for params to a specific namespace. Applies to both `name` and + // `selector` fields. + // + // A per-namespace parameter may be used by specifying a namespace-scoped + // `paramKind` in the policy and leaving this field empty. + // + // - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this + // field results in a configuration error. + // + // - If `paramKind` is namespace-scoped, the namespace of the object being + // evaluated for admission will be used when this field is left unset. Take + // care that if this is left empty the binding must not match any cluster-scoped + // resources, which will result in an error. // // +optional - optional string scope = 4; -} + optional string namespace = 2; -// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make -// sure that all the tuple expansions are valid. -message RuleWithOperations { - // Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * - // for all of those operations and any future admission operations that are added. - // If '*' is present, the length of the slice must be one. - // Required. - repeated string operations = 1; + // selector can be used to match multiple param objects based on their labels. + // Supply selector: {} to match all resources of the ParamKind. + // + // If multiple params are found, they are all evaluated with the policy expressions + // and the results are ANDed together. + // + // One of `name` or `selector` must be set, but `name` and `selector` are + // mutually exclusive properties. If one is set, the other must be unset. + // + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; - // Rule is embedded, it describes other criteria of the rule, like - // APIGroups, APIVersions, Resources, etc. - optional Rule rule = 2; + // `parameterNotFoundAction` controls the behavior of the binding when the resource + // exists, and name or selector is valid, but there are no parameters + // matched by the binding. If the value is set to `Allow`, then no + // matched parameters will be treated as successful validation by the binding. + // If set to `Deny`, then no matched parameters will be subject to the + // `failurePolicy` of the policy. + // + // Allowed values are `Allow` or `Deny` + // + // Required + optional string parameterNotFoundAction = 4; } // ServiceReference holds a reference to Service.legacy.k8s.io @@ -282,6 +504,248 @@ message ServiceReference { optional int32 port = 4; } +// TypeChecking contains results of type checking the expressions in the +// ValidatingAdmissionPolicy +message TypeChecking { + // The type checking warnings for each expression. + // +optional + // +listType=atomic + repeated ExpressionWarning expressionWarnings = 1; +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.28 +// ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it. +message ValidatingAdmissionPolicy { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the ValidatingAdmissionPolicy. + optional ValidatingAdmissionPolicySpec spec = 2; + + // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy + // behaves in the expected way. + // Populated by the system. + // Read-only. + // +optional + optional ValidatingAdmissionPolicyStatus status = 3; +} + +// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. +// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters. +// +// For a given admission request, each binding will cause its policy to be +// evaluated N times, where N is 1 for policies/bindings that don't use +// params, otherwise N is the number of parameters selected by the binding. +// +// The CEL expressions of a policy must have a computed CEL cost below the maximum +// CEL budget. Each evaluation of the policy is given an independent CEL cost budget. +// Adding/removing policies, bindings, or params can not affect whether a +// given (policy, binding, param) combination is within its own CEL budget. +message ValidatingAdmissionPolicyBinding { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding. + optional ValidatingAdmissionPolicyBindingSpec spec = 2; +} + +// ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding. +message ValidatingAdmissionPolicyBindingList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of PolicyBinding. + repeated ValidatingAdmissionPolicyBinding items = 2; +} + +// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. +message ValidatingAdmissionPolicyBindingSpec { + // PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. + // If the referenced resource does not exist, this binding is considered invalid and will be ignored + // Required. + optional string policyName = 1; + + // paramRef specifies the parameter resource used to configure the admission control policy. + // It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. + // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. + // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param. + // +optional + optional ParamRef paramRef = 2; + + // MatchResources declares what resources match this binding and will be validated by it. + // Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. + // If this is unset, all resources matched by the policy are validated by this binding + // When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. + // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required. + // +optional + optional MatchResources matchResources = 3; + + // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. + // If a validation evaluates to false it is always enforced according to these actions. + // + // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according + // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are + // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. + // + // validationActions is declared as a set of action values. Order does + // not matter. validationActions may not contain duplicates of the same action. + // + // The supported actions values are: + // + // "Deny" specifies that a validation failure results in a denied request. + // + // "Warn" specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + // + // "Audit" specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failures, formatted as + // a JSON list of objects, each with the following fields: + // - message: The validation failure message string + // - policy: The resource name of the ValidatingAdmissionPolicy + // - binding: The resource name of the ValidatingAdmissionPolicyBinding + // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy + // - validationActions: The enforcement actions enacted for the validation failure + // Example audit annotation: + // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"` + // + // Clients should expect to handle additional values by ignoring + // any values not recognized. + // + // "Deny" and "Warn" may not be used together since this combination + // needlessly duplicates the validation failure both in the + // API response body and the HTTP warning headers. + // + // Required. + // +listType=set + repeated string validationActions = 4; +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.28 +// ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy. +message ValidatingAdmissionPolicyList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingAdmissionPolicy. + repeated ValidatingAdmissionPolicy items = 2; +} + +// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. +message ValidatingAdmissionPolicySpec { + // ParamKind specifies the kind of resources used to parameterize this policy. + // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. + // If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. + // If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null. + // +optional + optional ParamKind paramKind = 1; + + // MatchConstraints specifies what resources this policy is designed to validate. + // The AdmissionPolicy cares about a request if it matches _all_ Constraints. + // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API + // ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. + // Required. + optional MatchResources matchConstraints = 2; + + // Validations contain CEL expressions which is used to apply the validation. + // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is + // required. + // +listType=atomic + // +optional + repeated Validation validations = 3; + + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // + // A policy is invalid if spec.paramKind refers to a non-existent Kind. + // A binding is invalid if spec.paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions + // define how failures are enforced. + // + // Allowed values are Ignore or Fail. Defaults to Fail. + // +optional + optional string failurePolicy = 4; + + // auditAnnotations contains CEL expressions which are used to produce audit + // annotations for the audit event of the API request. + // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is + // required. + // +listType=atomic + // +optional + repeated AuditAnnotation auditAnnotations = 5; + + // MatchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated MatchCondition matchConditions = 6; + + // Variables contain definitions of variables that can be used in composition of other expressions. + // Each variable is defined as a named CEL expression. + // The variables defined here will be available under `variables` in other expressions of the policy + // except MatchConditions because MatchConditions are evaluated before the rest of the policy. + // + // The expression of a variable can refer to other variables defined earlier in the list but not those after. + // Thus, Variables must be sorted by the order of first appearance and acyclic. + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated Variable variables = 7; +} + +// ValidatingAdmissionPolicyStatus represents the status of an admission validation policy. +message ValidatingAdmissionPolicyStatus { + // The generation observed by the controller. + // +optional + optional int64 observedGeneration = 1; + + // The results of type checking for each expression. + // Presence of this field indicates the completion of the type checking. + // +optional + optional TypeChecking typeChecking = 2; + + // The conditions represent the latest available observations of a policy's current state. + // +optional + // +listType=map + // +listMapKey=type + repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; +} + // ValidatingWebhook describes an admission webhook and the resources and operations it applies to. message ValidatingWebhook { // The name of the admission webhook. @@ -301,7 +765,7 @@ message ValidatingWebhook { // from putting the cluster in a state which cannot be recovered from without completely // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. - repeated RuleWithOperations rules = 3; + repeated k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3; // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - // allowed values are Ignore or Fail. Defaults to Ignore. @@ -412,6 +876,28 @@ message ValidatingWebhook { // Default to `['v1beta1']`. // +optional repeated string admissionReviewVersions = 8; + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + repeated MatchCondition matchConditions = 11; } // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. @@ -439,6 +925,97 @@ message ValidatingWebhookConfigurationList { repeated ValidatingWebhookConfiguration items = 2; } +// Validation specifies the CEL expression which is used to apply the validation. +message Validation { + // Expression represents the expression which will be evaluated by CEL. + // ref: https://github.com/google/cel-spec + // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object. No other metadata properties are accessible. + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Accessible property names are escaped according to the following rules when accessed in the expression: + // - '__' escapes to '__underscores__' + // - '.' escapes to '__dot__' + // - '-' escapes to '__dash__' + // - '/' escapes to '__slash__' + // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: + // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", + // "import", "let", "loop", "package", "namespace", "return". + // Examples: + // - Expression accessing a property named "namespace": {"Expression": "object.__namespace__ > 0"} + // - Expression accessing a property named "x-prop": {"Expression": "object.x__dash__prop > 0"} + // - Expression accessing a property named "redact__d": {"Expression": "object.redact__underscores__d > 0"} + // + // Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. + // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: + // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and + // non-intersecting elements in `Y` are appended, retaining their partial order. + // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values + // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with + // non-intersecting keys are appended, retaining their partial order. + // Required. + optional string Expression = 1; + + // Message represents the message displayed when validation fails. The message is required if the Expression contains + // line breaks. The message must not contain line breaks. + // If unset, the message is "failed rule: {Rule}". + // e.g. "must be a URL with the host matching spec.host" + // If the Expression contains line breaks. Message is required. + // The message must not contain line breaks. + // If unset, the message is "failed Expression: {Expression}". + // +optional + optional string message = 2; + + // Reason represents a machine-readable description of why this validation failed. + // If this is the first validation in the list to fail, this reason, as well as the + // corresponding HTTP response code, are used in the + // HTTP response to the client. + // The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge". + // If not set, StatusReasonInvalid is used in the response to the client. + // +optional + optional string reason = 3; + + // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. + // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. + // Example: + // "object.x must be less than max ("+string(params.max)+")" + // +optional + optional string messageExpression = 4; +} + +// Variable is the definition of a variable that is used for composition. A variable is defined as a named expression. +// +structType=atomic +message Variable { + // Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. + // The variable can be accessed in other expressions through `variables` + // For example, if name is "foo", the variable will be available as `variables.foo` + optional string Name = 1; + + // Expression is the expression that will be evaluated as the value of the variable. + // The CEL expression has access to the same identifiers as the CEL expressions in Validation. + optional string Expression = 2; +} + // WebhookClientConfig contains the information to make a TLS // connection with the webhook message WebhookClientConfig { diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/register.go b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go index 098744cf6..363233a2f 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/register.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go @@ -50,6 +50,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ValidatingWebhookConfigurationList{}, &MutatingWebhookConfiguration{}, &MutatingWebhookConfigurationList{}, + &ValidatingAdmissionPolicy{}, + &ValidatingAdmissionPolicyList{}, + &ValidatingAdmissionPolicyBinding{}, + &ValidatingAdmissionPolicyBindingList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index 630ea1f57..c199702fb 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -17,63 +17,37 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended // to make sure that all the tuple expansions are valid. -type Rule struct { - // APIGroups is the API groups the resources belong to. '*' is all groups. - // If '*' is present, the length of the slice must be one. - // Required. - APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` - - // APIVersions is the API versions the resources belong to. '*' is all versions. - // If '*' is present, the length of the slice must be one. - // Required. - APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` - - // Resources is a list of resources this rule applies to. - // - // For example: - // 'pods' means pods. - // 'pods/log' means the log subresource of pods. - // '*' means all resources, but not subresources. - // 'pods/*' means all subresources of pods. - // '*/scale' means all scale subresources. - // '*/*' means all resources and their subresources. - // - // If wildcard is present, the validation rule will ensure resources do not - // overlap with each other. - // - // Depending on the enclosing object, subresources might not be allowed. - // Required. - Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` - - // scope specifies the scope of this rule. - // Valid values are "Cluster", "Namespaced", and "*" - // "Cluster" means that only cluster-scoped resources will match this rule. - // Namespace API objects are cluster-scoped. - // "Namespaced" means that only namespaced resources will match this rule. - // "*" means that there are no scope restrictions. - // Subresources match the scope of their parent resource. - // Default is "*". - // - // +optional - Scope *ScopeType `json:"scope,omitempty" protobuf:"bytes,4,rep,name=scope"` -} +type Rule = v1.Rule // ScopeType specifies a scope for a Rule. -type ScopeType string +type ScopeType = v1.ScopeType const ( // ClusterScope means that scope is limited to cluster-scoped objects. // Namespace objects are cluster-scoped. - ClusterScope ScopeType = "Cluster" + ClusterScope ScopeType = v1.ClusterScope // NamespacedScope means that scope is limited to namespaced objects. - NamespacedScope ScopeType = "Namespaced" + NamespacedScope ScopeType = v1.NamespacedScope // AllScopes means that all scopes are included. - AllScopes ScopeType = "*" + AllScopes ScopeType = v1.AllScopes +) + +// ParameterNotFoundActionType specifies a failure policy that defines how a binding +// is evaluated when the param referred by its perNamespaceParamRef is not found. +type ParameterNotFoundActionType string + +const ( + // Allow means all requests will be admitted if no param resources + // could be found. + AllowAction ParameterNotFoundActionType = "Allow" + // Deny means all requests will be denied if no param resources are found. + DenyAction ParameterNotFoundActionType = "Deny" ) // FailurePolicyType specifies a failure policy that defines how unrecognized errors from the admission endpoint are handled. @@ -113,6 +87,584 @@ const ( SideEffectClassNoneOnDryRun SideEffectClass = "NoneOnDryRun" ) +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.28 +// ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it. +type ValidatingAdmissionPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the ValidatingAdmissionPolicy. + Spec ValidatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy + // behaves in the expected way. + // Populated by the system. + // Read-only. + // +optional + Status ValidatingAdmissionPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ValidatingAdmissionPolicyStatus represents the status of an admission validation policy. +type ValidatingAdmissionPolicyStatus struct { + // The generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + // The results of type checking for each expression. + // Presence of this field indicates the completion of the type checking. + // +optional + TypeChecking *TypeChecking `json:"typeChecking,omitempty" protobuf:"bytes,2,opt,name=typeChecking"` + // The conditions represent the latest available observations of a policy's current state. + // +optional + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` +} + +// ValidatingAdmissionPolicyConditionType is the condition type of admission validation policy. +type ValidatingAdmissionPolicyConditionType string + +// TypeChecking contains results of type checking the expressions in the +// ValidatingAdmissionPolicy +type TypeChecking struct { + // The type checking warnings for each expression. + // +optional + // +listType=atomic + ExpressionWarnings []ExpressionWarning `json:"expressionWarnings,omitempty" protobuf:"bytes,1,rep,name=expressionWarnings"` +} + +// ExpressionWarning is a warning information that targets a specific expression. +type ExpressionWarning struct { + // The path to the field that refers the expression. + // For example, the reference to the expression of the first item of + // validations is "spec.validations[0].expression" + FieldRef string `json:"fieldRef" protobuf:"bytes,2,opt,name=fieldRef"` + // The content of type checking information in a human-readable form. + // Each line of the warning contains the type that the expression is checked + // against, followed by the type check error from the compiler. + Warning string `json:"warning" protobuf:"bytes,3,opt,name=warning"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.28 +// ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy. +type ValidatingAdmissionPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of ValidatingAdmissionPolicy. + Items []ValidatingAdmissionPolicy `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` +} + +// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. +type ValidatingAdmissionPolicySpec struct { + // ParamKind specifies the kind of resources used to parameterize this policy. + // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. + // If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. + // If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null. + // +optional + ParamKind *ParamKind `json:"paramKind,omitempty" protobuf:"bytes,1,rep,name=paramKind"` + + // MatchConstraints specifies what resources this policy is designed to validate. + // The AdmissionPolicy cares about a request if it matches _all_ Constraints. + // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API + // ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. + // Required. + MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"` + + // Validations contain CEL expressions which is used to apply the validation. + // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is + // required. + // +listType=atomic + // +optional + Validations []Validation `json:"validations,omitempty" protobuf:"bytes,3,rep,name=validations"` + + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // + // A policy is invalid if spec.paramKind refers to a non-existent Kind. + // A binding is invalid if spec.paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions + // define how failures are enforced. + // + // Allowed values are Ignore or Fail. Defaults to Fail. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // auditAnnotations contains CEL expressions which are used to produce audit + // annotations for the audit event of the API request. + // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is + // required. + // +listType=atomic + // +optional + AuditAnnotations []AuditAnnotation `json:"auditAnnotations,omitempty" protobuf:"bytes,5,rep,name=auditAnnotations"` + + // MatchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"` + + // Variables contain definitions of variables that can be used in composition of other expressions. + // Each variable is defined as a named CEL expression. + // The variables defined here will be available under `variables` in other expressions of the policy + // except MatchConditions because MatchConditions are evaluated before the rest of the policy. + // + // The expression of a variable can refer to other variables defined earlier in the list but not those after. + // Thus, Variables must be sorted by the order of first appearance and acyclic. + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + Variables []Variable `json:"variables" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` +} + +// ParamKind is a tuple of Group Kind and Version. +// +structType=atomic +type ParamKind struct { + // APIVersion is the API group version the resources belong to. + // In format of "group/version". + // Required. + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,rep,name=apiVersion"` + + // Kind is the API kind the resources belong to. + // Required. + Kind string `json:"kind,omitempty" protobuf:"bytes,2,rep,name=kind"` +} + +// Validation specifies the CEL expression which is used to apply the validation. +type Validation struct { + // Expression represents the expression which will be evaluated by CEL. + // ref: https://github.com/google/cel-spec + // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object. No other metadata properties are accessible. + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Accessible property names are escaped according to the following rules when accessed in the expression: + // - '__' escapes to '__underscores__' + // - '.' escapes to '__dot__' + // - '-' escapes to '__dash__' + // - '/' escapes to '__slash__' + // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: + // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", + // "import", "let", "loop", "package", "namespace", "return". + // Examples: + // - Expression accessing a property named "namespace": {"Expression": "object.__namespace__ > 0"} + // - Expression accessing a property named "x-prop": {"Expression": "object.x__dash__prop > 0"} + // - Expression accessing a property named "redact__d": {"Expression": "object.redact__underscores__d > 0"} + // + // Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. + // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: + // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and + // non-intersecting elements in `Y` are appended, retaining their partial order. + // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values + // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with + // non-intersecting keys are appended, retaining their partial order. + // Required. + Expression string `json:"expression" protobuf:"bytes,1,opt,name=Expression"` + // Message represents the message displayed when validation fails. The message is required if the Expression contains + // line breaks. The message must not contain line breaks. + // If unset, the message is "failed rule: {Rule}". + // e.g. "must be a URL with the host matching spec.host" + // If the Expression contains line breaks. Message is required. + // The message must not contain line breaks. + // If unset, the message is "failed Expression: {Expression}". + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // Reason represents a machine-readable description of why this validation failed. + // If this is the first validation in the list to fail, this reason, as well as the + // corresponding HTTP response code, are used in the + // HTTP response to the client. + // The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge". + // If not set, StatusReasonInvalid is used in the response to the client. + // +optional + Reason *metav1.StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. + // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. + // Example: + // "object.x must be less than max ("+string(params.max)+")" + // +optional + MessageExpression string `json:"messageExpression,omitempty" protobuf:"bytes,4,opt,name=messageExpression"` +} + +// Variable is the definition of a variable that is used for composition. A variable is defined as a named expression. +// +structType=atomic +type Variable struct { + // Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. + // The variable can be accessed in other expressions through `variables` + // For example, if name is "foo", the variable will be available as `variables.foo` + Name string `json:"name" protobuf:"bytes,1,opt,name=Name"` + + // Expression is the expression that will be evaluated as the value of the variable. + // The CEL expression has access to the same identifiers as the CEL expressions in Validation. + Expression string `json:"expression" protobuf:"bytes,2,opt,name=Expression"` +} + +// AuditAnnotation describes how to produce an audit annotation for an API request. +type AuditAnnotation struct { + // key specifies the audit annotation key. The audit annotation keys of + // a ValidatingAdmissionPolicy must be unique. The key must be a qualified + // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. + // + // The key is combined with the resource name of the + // ValidatingAdmissionPolicy to construct an audit annotation key: + // "{ValidatingAdmissionPolicy name}/{key}". + // + // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy + // and the same audit annotation key, the annotation key will be identical. + // In this case, the first annotation written with the key will be included + // in the audit event and all subsequent annotations with the same key + // will be discarded. + // + // Required. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + + // valueExpression represents the expression which is evaluated by CEL to + // produce an audit annotation value. The expression must evaluate to either + // a string or null value. If the expression evaluates to a string, the + // audit annotation is included with the string value. If the expression + // evaluates to null or empty string the audit annotation will be omitted. + // The valueExpression may be no longer than 5kb in length. + // If the result of the valueExpression is more than 10kb in length, it + // will be truncated to 10kb. + // + // If multiple ValidatingAdmissionPolicyBinding resources match an + // API request, then the valueExpression will be evaluated for + // each binding. All unique values produced by the valueExpressions + // will be joined together in a comma-separated list. + // + // Required. + ValueExpression string `json:"valueExpression" protobuf:"bytes,2,opt,name=valueExpression"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.28 + +// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. +// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters. +// +// For a given admission request, each binding will cause its policy to be +// evaluated N times, where N is 1 for policies/bindings that don't use +// params, otherwise N is the number of parameters selected by the binding. +// +// The CEL expressions of a policy must have a computed CEL cost below the maximum +// CEL budget. Each evaluation of the policy is given an independent CEL cost budget. +// Adding/removing policies, bindings, or params can not affect whether a +// given (policy, binding, param) combination is within its own CEL budget. +type ValidatingAdmissionPolicyBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding. + Spec ValidatingAdmissionPolicyBindingSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.28 + +// ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding. +type ValidatingAdmissionPolicyBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of PolicyBinding. + Items []ValidatingAdmissionPolicyBinding `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` +} + +// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. +type ValidatingAdmissionPolicyBindingSpec struct { + // PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. + // If the referenced resource does not exist, this binding is considered invalid and will be ignored + // Required. + PolicyName string `json:"policyName,omitempty" protobuf:"bytes,1,rep,name=policyName"` + + // paramRef specifies the parameter resource used to configure the admission control policy. + // It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. + // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. + // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param. + // +optional + ParamRef *ParamRef `json:"paramRef,omitempty" protobuf:"bytes,2,rep,name=paramRef"` + + // MatchResources declares what resources match this binding and will be validated by it. + // Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. + // If this is unset, all resources matched by the policy are validated by this binding + // When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. + // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required. + // +optional + MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"` + + // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. + // If a validation evaluates to false it is always enforced according to these actions. + // + // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according + // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are + // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. + // + // validationActions is declared as a set of action values. Order does + // not matter. validationActions may not contain duplicates of the same action. + // + // The supported actions values are: + // + // "Deny" specifies that a validation failure results in a denied request. + // + // "Warn" specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + // + // "Audit" specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failures, formatted as + // a JSON list of objects, each with the following fields: + // - message: The validation failure message string + // - policy: The resource name of the ValidatingAdmissionPolicy + // - binding: The resource name of the ValidatingAdmissionPolicyBinding + // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy + // - validationActions: The enforcement actions enacted for the validation failure + // Example audit annotation: + // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"` + // + // Clients should expect to handle additional values by ignoring + // any values not recognized. + // + // "Deny" and "Warn" may not be used together since this combination + // needlessly duplicates the validation failure both in the + // API response body and the HTTP warning headers. + // + // Required. + // +listType=set + ValidationActions []ValidationAction `json:"validationActions,omitempty" protobuf:"bytes,4,rep,name=validationActions"` +} + +// ParamRef describes how to locate the params to be used as input to +// expressions of rules applied by a policy binding. +// +structType=atomic +type ParamRef struct { + // name is the name of the resource being referenced. + // + // One of `name` or `selector` must be set, but `name` and `selector` are + // mutually exclusive properties. If one is set, the other must be unset. + // + // A single parameter used for all admission requests can be configured + // by setting the `name` field, leaving `selector` blank, and setting namespace + // if `paramKind` is namespace-scoped. + // + Name string `json:"name,omitempty" protobuf:"bytes,1,rep,name=name"` + + // namespace is the namespace of the referenced resource. Allows limiting + // the search for params to a specific namespace. Applies to both `name` and + // `selector` fields. + // + // A per-namespace parameter may be used by specifying a namespace-scoped + // `paramKind` in the policy and leaving this field empty. + // + // - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this + // field results in a configuration error. + // + // - If `paramKind` is namespace-scoped, the namespace of the object being + // evaluated for admission will be used when this field is left unset. Take + // care that if this is left empty the binding must not match any cluster-scoped + // resources, which will result in an error. + // + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,rep,name=namespace"` + + // selector can be used to match multiple param objects based on their labels. + // Supply selector: {} to match all resources of the ParamKind. + // + // If multiple params are found, they are all evaluated with the policy expressions + // and the results are ANDed together. + // + // One of `name` or `selector` must be set, but `name` and `selector` are + // mutually exclusive properties. If one is set, the other must be unset. + // + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,rep,name=selector"` + + // `parameterNotFoundAction` controls the behavior of the binding when the resource + // exists, and name or selector is valid, but there are no parameters + // matched by the binding. If the value is set to `Allow`, then no + // matched parameters will be treated as successful validation by the binding. + // If set to `Deny`, then no matched parameters will be subject to the + // `failurePolicy` of the policy. + // + // Allowed values are `Allow` or `Deny` + // + // Required + ParameterNotFoundAction *ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty" protobuf:"bytes,4,rep,name=parameterNotFoundAction"` +} + +// MatchResources decides whether to run the admission control policy on an object based +// on whether it meets the match criteria. +// The exclude rules take precedence over include rules (if a resource matches both, it is excluded) +// +structType=atomic +type MatchResources struct { + // NamespaceSelector decides whether to run the admission control policy on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the policy. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the policy on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,1,opt,name=namespaceSelector"` + // ObjectSelector decides whether to run the validation based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the cel validation, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,2,opt,name=objectSelector"` + // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. + // The policy cares about an operation if it matches _any_ Rule. + // +listType=atomic + // +optional + ResourceRules []NamedRuleWithOperations `json:"resourceRules,omitempty" protobuf:"bytes,3,rep,name=resourceRules"` + // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. + // The exclude rules take precedence over include rules (if a resource matches both, it is excluded) + // +listType=atomic + // +optional + ExcludeResourceRules []NamedRuleWithOperations `json:"excludeResourceRules,omitempty" protobuf:"bytes,4,rep,name=excludeResourceRules"` + // matchPolicy defines how the "MatchResources" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy. + // + // Defaults to "Equivalent" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,7,opt,name=matchPolicy,casttype=MatchPolicyType"` +} + +// ValidationAction specifies a policy enforcement action. +// +enum +type ValidationAction string + +const ( + // Deny specifies that a validation failure results in a denied request. + Deny ValidationAction = "Deny" + // Warn specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + Warn ValidationAction = "Warn" + // Audit specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failure. + Audit ValidationAction = "Audit" +) + +// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames. +// +structType=atomic +type NamedRuleWithOperations struct { + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +listType=atomic + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,1,rep,name=resourceNames"` + // RuleWithOperations is a tuple of Operations and Resources. + RuleWithOperations `json:",inline" protobuf:"bytes,2,opt,name=ruleWithOperations"` +} + // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -321,6 +873,28 @@ type ValidatingWebhook struct { // Default to `['v1beta1']`. // +optional AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,11,rep,name=matchConditions"` } // MutatingWebhook describes an admission webhook and the resources and operations it applies to. @@ -471,6 +1045,28 @@ type MutatingWebhook struct { // Defaults to "Never". // +optional ReinvocationPolicy *ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,12,rep,name=matchConditions"` } // ReinvocationPolicyType specifies what type of policy the admission hook uses. @@ -488,27 +1084,19 @@ const ( // RuleWithOperations is a tuple of Operations and Resources. It is recommended to make // sure that all the tuple expansions are valid. -type RuleWithOperations struct { - // Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * - // for all of those operations and any future admission operations that are added. - // If '*' is present, the length of the slice must be one. - // Required. - Operations []OperationType `json:"operations,omitempty" protobuf:"bytes,1,rep,name=operations,casttype=OperationType"` - // Rule is embedded, it describes other criteria of the rule, like - // APIGroups, APIVersions, Resources, etc. - Rule `json:",inline" protobuf:"bytes,2,opt,name=rule"` -} +type RuleWithOperations = v1.RuleWithOperations // OperationType specifies an operation for a request. -type OperationType string +// +enum +type OperationType = v1.OperationType // The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go. const ( - OperationAll OperationType = "*" - Create OperationType = "CREATE" - Update OperationType = "UPDATE" - Delete OperationType = "DELETE" - Connect OperationType = "CONNECT" + OperationAll OperationType = v1.OperationAll + Create OperationType = v1.Create + Update OperationType = v1.Update + Delete OperationType = v1.Delete + Connect OperationType = v1.Connect ) // WebhookClientConfig contains the information to make a TLS @@ -577,3 +1165,32 @@ type ServiceReference struct { // +optional Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` } + +// MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook. +type MatchCondition struct { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + Expression string `json:"expression" protobuf:"bytes,2,opt,name=expression"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go index 314c3afae..adaf4bc11 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -24,9 +24,52 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AuditAnnotation = map[string]string{ + "": "AuditAnnotation describes how to produce an audit annotation for an API request.", + "key": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.", + "valueExpression": "valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\n\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\n\nRequired.", +} + +func (AuditAnnotation) SwaggerDoc() map[string]string { + return map_AuditAnnotation +} + +var map_ExpressionWarning = map[string]string{ + "": "ExpressionWarning is a warning information that targets a specific expression.", + "fieldRef": "The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\"", + "warning": "The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler.", +} + +func (ExpressionWarning) SwaggerDoc() map[string]string { + return map_ExpressionWarning +} + +var map_MatchCondition = map[string]string{ + "": "MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook.", + "name": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.", + "expression": "Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\n\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\n\nRequired.", +} + +func (MatchCondition) SwaggerDoc() map[string]string { + return map_MatchCondition +} + +var map_MatchResources = map[string]string{ + "": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "namespaceSelector": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the validation based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the cel validation, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "resourceRules": "ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule.", + "excludeResourceRules": "ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "matchPolicy": "matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\n\nDefaults to \"Equivalent\"", +} + +func (MatchResources) SwaggerDoc() map[string]string { + return map_MatchResources +} + var map_MutatingWebhook = map[string]string{ "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", @@ -40,6 +83,7 @@ var map_MutatingWebhook = map[string]string{ "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.", } func (MutatingWebhook) SwaggerDoc() map[string]string { @@ -66,25 +110,35 @@ func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string { return map_MutatingWebhookConfigurationList } -var map_Rule = map[string]string{ - "": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", - "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", - "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", - "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", - "scope": "scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\".", +var map_NamedRuleWithOperations = map[string]string{ + "": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", } -func (Rule) SwaggerDoc() map[string]string { - return map_Rule +func (NamedRuleWithOperations) SwaggerDoc() map[string]string { + return map_NamedRuleWithOperations } -var map_RuleWithOperations = map[string]string{ - "": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", - "operations": "Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required.", +var map_ParamKind = map[string]string{ + "": "ParamKind is a tuple of Group Kind and Version.", + "apiVersion": "APIVersion is the API group version the resources belong to. In format of \"group/version\". Required.", + "kind": "Kind is the API kind the resources belong to. Required.", } -func (RuleWithOperations) SwaggerDoc() map[string]string { - return map_RuleWithOperations +func (ParamKind) SwaggerDoc() map[string]string { + return map_ParamKind +} + +var map_ParamRef = map[string]string{ + "": "ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding.", + "name": "name is the name of the resource being referenced.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.\n\nA single parameter used for all admission requests can be configured by setting the `name` field, leaving `selector` blank, and setting namespace if `paramKind` is namespace-scoped.", + "namespace": "namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\n\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\n\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\n\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error.", + "selector": "selector can be used to match multiple param objects based on their labels. Supply selector: {} to match all resources of the ParamKind.\n\nIf multiple params are found, they are all evaluated with the policy expressions and the results are ANDed together.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.", + "parameterNotFoundAction": "`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\n\nAllowed values are `Allow` or `Deny`\n\nRequired", +} + +func (ParamRef) SwaggerDoc() map[string]string { + return map_ParamRef } var map_ServiceReference = map[string]string{ @@ -99,6 +153,94 @@ func (ServiceReference) SwaggerDoc() map[string]string { return map_ServiceReference } +var map_TypeChecking = map[string]string{ + "": "TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy", + "expressionWarnings": "The type checking warnings for each expression.", +} + +func (TypeChecking) SwaggerDoc() map[string]string { + return map_TypeChecking +} + +var map_ValidatingAdmissionPolicy = map[string]string{ + "": "ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "Specification of the desired behavior of the ValidatingAdmissionPolicy.", + "status": "The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy behaves in the expected way. Populated by the system. Read-only.", +} + +func (ValidatingAdmissionPolicy) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicy +} + +var map_ValidatingAdmissionPolicyBinding = map[string]string{ + "": "ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding.\n\nThe CEL expressions of a policy must have a computed CEL cost below the maximum CEL budget. Each evaluation of the policy is given an independent CEL cost budget. Adding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.", +} + +func (ValidatingAdmissionPolicyBinding) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyBinding +} + +var map_ValidatingAdmissionPolicyBindingList = map[string]string{ + "": "ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of PolicyBinding.", +} + +func (ValidatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyBindingList +} + +var map_ValidatingAdmissionPolicyBindingSpec = map[string]string{ + "": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.", + "policyName": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", + "paramRef": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.", + "matchResources": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.", + "validationActions": "validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\n\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\n\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\n\nThe supported actions values are:\n\n\"Deny\" specifies that a validation failure results in a denied request.\n\n\"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\n\n\"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"`\n\nClients should expect to handle additional values by ignoring any values not recognized.\n\n\"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\n\nRequired.", +} + +func (ValidatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyBindingSpec +} + +var map_ValidatingAdmissionPolicyList = map[string]string{ + "": "ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of ValidatingAdmissionPolicy.", +} + +func (ValidatingAdmissionPolicyList) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyList +} + +var map_ValidatingAdmissionPolicySpec = map[string]string{ + "": "ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.", + "paramKind": "ParamKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.", + "matchConstraints": "MatchConstraints specifies what resources this policy is designed to validate. The AdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. Required.", + "validations": "Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required.", + "failurePolicy": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\n\nAllowed values are Ignore or Fail. Defaults to Fail.", + "auditAnnotations": "auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped", + "variables": "Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic.", +} + +func (ValidatingAdmissionPolicySpec) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicySpec +} + +var map_ValidatingAdmissionPolicyStatus = map[string]string{ + "": "ValidatingAdmissionPolicyStatus represents the status of an admission validation policy.", + "observedGeneration": "The generation observed by the controller.", + "typeChecking": "The results of type checking for each expression. Presence of this field indicates the completion of the type checking.", + "conditions": "The conditions represent the latest available observations of a policy's current state.", +} + +func (ValidatingAdmissionPolicyStatus) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyStatus +} + var map_ValidatingWebhook = map[string]string{ "": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.", "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", @@ -111,6 +253,7 @@ var map_ValidatingWebhook = map[string]string{ "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.", } func (ValidatingWebhook) SwaggerDoc() map[string]string { @@ -137,6 +280,28 @@ func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string { return map_ValidatingWebhookConfigurationList } +var map_Validation = map[string]string{ + "": "Validation specifies the CEL expression which is used to apply the validation.", + "expression": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.", + "message": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".", + "reason": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.", + "messageExpression": "messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\"", +} + +func (Validation) SwaggerDoc() map[string]string { + return map_Validation +} + +var map_Variable = map[string]string{ + "": "Variable is the definition of a variable that is used for composition. A variable is defined as a named expression.", + "name": "Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo`", + "expression": "Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation.", +} + +func (Variable) SwaggerDoc() map[string]string { + return map_Variable +} + var map_WebhookClientConfig = map[string]string{ "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go index 23ddb1fa2..4c10b1d11 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -22,17 +22,111 @@ limitations under the License. package v1beta1 import ( + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditAnnotation. +func (in *AuditAnnotation) DeepCopy() *AuditAnnotation { + if in == nil { + return nil + } + out := new(AuditAnnotation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressionWarning) DeepCopyInto(out *ExpressionWarning) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressionWarning. +func (in *ExpressionWarning) DeepCopy() *ExpressionWarning { + if in == nil { + return nil + } + out := new(ExpressionWarning) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchCondition) DeepCopyInto(out *MatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchCondition. +func (in *MatchCondition) DeepCopy() *MatchCondition { + if in == nil { + return nil + } + out := new(MatchCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchResources) DeepCopyInto(out *MatchResources) { + *out = *in + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ResourceRules != nil { + in, out := &in.ResourceRules, &out.ResourceRules + *out = make([]NamedRuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExcludeResourceRules != nil { + in, out := &in.ExcludeResourceRules, &out.ExcludeResourceRules + *out = make([]NamedRuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchResources. +func (in *MatchResources) DeepCopy() *MatchResources { + if in == nil { + return nil + } + out := new(MatchResources) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = *in in.ClientConfig.DeepCopyInto(&out.ClientConfig) if in.Rules != nil { in, out := &in.Rules, &out.Rules - *out = make([]RuleWithOperations, len(*in)) + *out = make([]admissionregistrationv1.RuleWithOperations, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -77,6 +171,11 @@ func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = new(ReinvocationPolicyType) **out = **in } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } @@ -157,59 +256,65 @@ func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Rule) DeepCopyInto(out *Rule) { +func (in *NamedRuleWithOperations) DeepCopyInto(out *NamedRuleWithOperations) { *out = *in - if in.APIGroups != nil { - in, out := &in.APIGroups, &out.APIGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.APIVersions != nil { - in, out := &in.APIVersions, &out.APIVersions + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames *out = make([]string, len(*in)) copy(*out, *in) } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Scope != nil { - in, out := &in.Scope, &out.Scope - *out = new(ScopeType) - **out = **in + in.RuleWithOperations.DeepCopyInto(&out.RuleWithOperations) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedRuleWithOperations. +func (in *NamedRuleWithOperations) DeepCopy() *NamedRuleWithOperations { + if in == nil { + return nil } + out := new(NamedRuleWithOperations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParamKind) DeepCopyInto(out *ParamKind) { + *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. -func (in *Rule) DeepCopy() *Rule { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamKind. +func (in *ParamKind) DeepCopy() *ParamKind { if in == nil { return nil } - out := new(Rule) + out := new(ParamKind) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuleWithOperations) DeepCopyInto(out *RuleWithOperations) { +func (in *ParamRef) DeepCopyInto(out *ParamRef) { *out = *in - if in.Operations != nil { - in, out := &in.Operations, &out.Operations - *out = make([]OperationType, len(*in)) - copy(*out, *in) + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ParameterNotFoundAction != nil { + in, out := &in.ParameterNotFoundAction, &out.ParameterNotFoundAction + *out = new(ParameterNotFoundActionType) + **out = **in } - in.Rule.DeepCopyInto(&out.Rule) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleWithOperations. -func (in *RuleWithOperations) DeepCopy() *RuleWithOperations { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamRef. +func (in *ParamRef) DeepCopy() *ParamRef { if in == nil { return nil } - out := new(RuleWithOperations) + out := new(ParamRef) in.DeepCopyInto(out) return out } @@ -240,13 +345,267 @@ func (in *ServiceReference) DeepCopy() *ServiceReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypeChecking) DeepCopyInto(out *TypeChecking) { + *out = *in + if in.ExpressionWarnings != nil { + in, out := &in.ExpressionWarnings, &out.ExpressionWarnings + *out = make([]ExpressionWarning, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeChecking. +func (in *TypeChecking) DeepCopy() *TypeChecking { + if in == nil { + return nil + } + out := new(TypeChecking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicy) DeepCopyInto(out *ValidatingAdmissionPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicy. +func (in *ValidatingAdmissionPolicy) DeepCopy() *ValidatingAdmissionPolicy { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyBinding) DeepCopyInto(out *ValidatingAdmissionPolicyBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBinding. +func (in *ValidatingAdmissionPolicyBinding) DeepCopy() *ValidatingAdmissionPolicyBinding { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicyBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyBindingList) DeepCopyInto(out *ValidatingAdmissionPolicyBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingAdmissionPolicyBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBindingList. +func (in *ValidatingAdmissionPolicyBindingList) DeepCopy() *ValidatingAdmissionPolicyBindingList { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicyBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopyInto(out *ValidatingAdmissionPolicyBindingSpec) { + *out = *in + if in.ParamRef != nil { + in, out := &in.ParamRef, &out.ParamRef + *out = new(ParamRef) + (*in).DeepCopyInto(*out) + } + if in.MatchResources != nil { + in, out := &in.MatchResources, &out.MatchResources + *out = new(MatchResources) + (*in).DeepCopyInto(*out) + } + if in.ValidationActions != nil { + in, out := &in.ValidationActions, &out.ValidationActions + *out = make([]ValidationAction, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBindingSpec. +func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopy() *ValidatingAdmissionPolicyBindingSpec { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyList) DeepCopyInto(out *ValidatingAdmissionPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingAdmissionPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyList. +func (in *ValidatingAdmissionPolicyList) DeepCopy() *ValidatingAdmissionPolicyList { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicySpec) DeepCopyInto(out *ValidatingAdmissionPolicySpec) { + *out = *in + if in.ParamKind != nil { + in, out := &in.ParamKind, &out.ParamKind + *out = new(ParamKind) + **out = **in + } + if in.MatchConstraints != nil { + in, out := &in.MatchConstraints, &out.MatchConstraints + *out = new(MatchResources) + (*in).DeepCopyInto(*out) + } + if in.Validations != nil { + in, out := &in.Validations, &out.Validations + *out = make([]Validation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.AuditAnnotations != nil { + in, out := &in.AuditAnnotations, &out.AuditAnnotations + *out = make([]AuditAnnotation, len(*in)) + copy(*out, *in) + } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make([]Variable, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicySpec. +func (in *ValidatingAdmissionPolicySpec) DeepCopy() *ValidatingAdmissionPolicySpec { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyStatus) DeepCopyInto(out *ValidatingAdmissionPolicyStatus) { + *out = *in + if in.TypeChecking != nil { + in, out := &in.TypeChecking, &out.TypeChecking + *out = new(TypeChecking) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyStatus. +func (in *ValidatingAdmissionPolicyStatus) DeepCopy() *ValidatingAdmissionPolicyStatus { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { *out = *in in.ClientConfig.DeepCopyInto(&out.ClientConfig) if in.Rules != nil { in, out := &in.Rules, &out.Rules - *out = make([]RuleWithOperations, len(*in)) + *out = make([]admissionregistrationv1.RuleWithOperations, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -286,6 +645,11 @@ func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } @@ -365,6 +729,43 @@ func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Validation) DeepCopyInto(out *Validation) { + *out = *in + if in.Reason != nil { + in, out := &in.Reason, &out.Reason + *out = new(v1.StatusReason) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Validation. +func (in *Validation) DeepCopy() *Validation { + if in == nil { + return nil + } + out := new(Validation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Variable) DeepCopyInto(out *Variable) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Variable. +func (in *Variable) DeepCopy() *Variable { + if in == nil { + return nil + } + out := new(Variable) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { *out = *in diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go index 09a92f476..c1be5122a 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go @@ -73,6 +73,78 @@ func (in *MutatingWebhookConfigurationList) APILifecycleRemoved() (major, minor return 1, 22 } +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) { + return 1, 28 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ValidatingAdmissionPolicy) APILifecycleDeprecated() (major, minor int) { + return 1, 31 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ValidatingAdmissionPolicy) APILifecycleRemoved() (major, minor int) { + return 1, 34 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) { + return 1, 28 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ValidatingAdmissionPolicyBinding) APILifecycleDeprecated() (major, minor int) { + return 1, 31 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ValidatingAdmissionPolicyBinding) APILifecycleRemoved() (major, minor int) { + return 1, 34 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) { + return 1, 28 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ValidatingAdmissionPolicyBindingList) APILifecycleDeprecated() (major, minor int) { + return 1, 31 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ValidatingAdmissionPolicyBindingList) APILifecycleRemoved() (major, minor int) { + return 1, 34 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) { + return 1, 28 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ValidatingAdmissionPolicyList) APILifecycleDeprecated() (major, minor int) { + return 1, 31 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ValidatingAdmissionPolicyList) APILifecycleRemoved() (major, minor int) { + return 1, 34 +} + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *ValidatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) { diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go b/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go new file mode 100644 index 000000000..e85da226e --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true + +// +groupName=apidiscovery.k8s.io + +package v2beta1 // import "k8s.io/api/apidiscovery/v2beta1" diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/generated.pb.go b/vendor/k8s.io/api/apidiscovery/v2beta1/generated.pb.go new file mode 100644 index 000000000..ba6eee1b3 --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/generated.pb.go @@ -0,0 +1,1744 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto + +package v2beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *APIGroupDiscovery) Reset() { *m = APIGroupDiscovery{} } +func (*APIGroupDiscovery) ProtoMessage() {} +func (*APIGroupDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptor_0442b7af4d680cb7, []int{0} +} +func (m *APIGroupDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIGroupDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIGroupDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIGroupDiscovery.Merge(m, src) +} +func (m *APIGroupDiscovery) XXX_Size() int { + return m.Size() +} +func (m *APIGroupDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_APIGroupDiscovery.DiscardUnknown(m) +} + +var xxx_messageInfo_APIGroupDiscovery proto.InternalMessageInfo + +func (m *APIGroupDiscoveryList) Reset() { *m = APIGroupDiscoveryList{} } +func (*APIGroupDiscoveryList) ProtoMessage() {} +func (*APIGroupDiscoveryList) Descriptor() ([]byte, []int) { + return fileDescriptor_0442b7af4d680cb7, []int{1} +} +func (m *APIGroupDiscoveryList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIGroupDiscoveryList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIGroupDiscoveryList) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIGroupDiscoveryList.Merge(m, src) +} +func (m *APIGroupDiscoveryList) XXX_Size() int { + return m.Size() +} +func (m *APIGroupDiscoveryList) XXX_DiscardUnknown() { + xxx_messageInfo_APIGroupDiscoveryList.DiscardUnknown(m) +} + +var xxx_messageInfo_APIGroupDiscoveryList proto.InternalMessageInfo + +func (m *APIResourceDiscovery) Reset() { *m = APIResourceDiscovery{} } +func (*APIResourceDiscovery) ProtoMessage() {} +func (*APIResourceDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptor_0442b7af4d680cb7, []int{2} +} +func (m *APIResourceDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIResourceDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIResourceDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIResourceDiscovery.Merge(m, src) +} +func (m *APIResourceDiscovery) XXX_Size() int { + return m.Size() +} +func (m *APIResourceDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_APIResourceDiscovery.DiscardUnknown(m) +} + +var xxx_messageInfo_APIResourceDiscovery proto.InternalMessageInfo + +func (m *APISubresourceDiscovery) Reset() { *m = APISubresourceDiscovery{} } +func (*APISubresourceDiscovery) ProtoMessage() {} +func (*APISubresourceDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptor_0442b7af4d680cb7, []int{3} +} +func (m *APISubresourceDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APISubresourceDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APISubresourceDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_APISubresourceDiscovery.Merge(m, src) +} +func (m *APISubresourceDiscovery) XXX_Size() int { + return m.Size() +} +func (m *APISubresourceDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_APISubresourceDiscovery.DiscardUnknown(m) +} + +var xxx_messageInfo_APISubresourceDiscovery proto.InternalMessageInfo + +func (m *APIVersionDiscovery) Reset() { *m = APIVersionDiscovery{} } +func (*APIVersionDiscovery) ProtoMessage() {} +func (*APIVersionDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptor_0442b7af4d680cb7, []int{4} +} +func (m *APIVersionDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIVersionDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIVersionDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIVersionDiscovery.Merge(m, src) +} +func (m *APIVersionDiscovery) XXX_Size() int { + return m.Size() +} +func (m *APIVersionDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_APIVersionDiscovery.DiscardUnknown(m) +} + +var xxx_messageInfo_APIVersionDiscovery proto.InternalMessageInfo + +func init() { + proto.RegisterType((*APIGroupDiscovery)(nil), "k8s.io.api.apidiscovery.v2beta1.APIGroupDiscovery") + proto.RegisterType((*APIGroupDiscoveryList)(nil), "k8s.io.api.apidiscovery.v2beta1.APIGroupDiscoveryList") + proto.RegisterType((*APIResourceDiscovery)(nil), "k8s.io.api.apidiscovery.v2beta1.APIResourceDiscovery") + proto.RegisterType((*APISubresourceDiscovery)(nil), "k8s.io.api.apidiscovery.v2beta1.APISubresourceDiscovery") + proto.RegisterType((*APIVersionDiscovery)(nil), "k8s.io.api.apidiscovery.v2beta1.APIVersionDiscovery") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto", fileDescriptor_0442b7af4d680cb7) +} + +var fileDescriptor_0442b7af4d680cb7 = []byte{ + // 754 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x4e, 0xdb, 0x4c, + 0x14, 0x8d, 0x09, 0xf9, 0x48, 0x26, 0xc9, 0xf7, 0x85, 0x01, 0xf4, 0x59, 0x2c, 0x6c, 0x94, 0x4d, + 0xa9, 0xd4, 0xda, 0x25, 0x02, 0xc4, 0x36, 0x29, 0xb4, 0x8d, 0xfa, 0x87, 0x26, 0x15, 0x95, 0xaa, + 0x2e, 0x6a, 0x3b, 0x17, 0xc7, 0x0d, 0xb1, 0xad, 0x99, 0x71, 0x24, 0x76, 0x7d, 0x84, 0xbe, 0x43, + 0x5f, 0x86, 0x55, 0xc5, 0xa2, 0x0b, 0xba, 0x89, 0x4a, 0xfa, 0x00, 0xdd, 0xb3, 0xaa, 0xec, 0x8c, + 0x7f, 0x42, 0x40, 0x44, 0x5d, 0x74, 0x81, 0x84, 0xcf, 0x3d, 0xe7, 0xdc, 0x7b, 0x2e, 0xd7, 0x06, + 0x3d, 0xeb, 0xef, 0x31, 0xcd, 0xf1, 0xf4, 0x7e, 0x60, 0x02, 0x75, 0x81, 0x03, 0xd3, 0x87, 0xe0, + 0x76, 0x3d, 0xaa, 0x8b, 0x82, 0xe1, 0x3b, 0xe1, 0x4f, 0xd7, 0x61, 0x96, 0x37, 0x04, 0x7a, 0xaa, + 0x0f, 0x1b, 0x26, 0x70, 0x63, 0x4b, 0xb7, 0xc1, 0x05, 0x6a, 0x70, 0xe8, 0x6a, 0x3e, 0xf5, 0xb8, + 0x87, 0xd5, 0x89, 0x40, 0x33, 0x7c, 0x47, 0xcb, 0x0a, 0x34, 0x21, 0x58, 0x7f, 0x68, 0x3b, 0xbc, + 0x17, 0x98, 0x9a, 0xe5, 0x0d, 0x74, 0xdb, 0xb3, 0x3d, 0x3d, 0xd2, 0x99, 0xc1, 0x71, 0xf4, 0x14, + 0x3d, 0x44, 0xbf, 0x4d, 0xfc, 0xd6, 0xb7, 0xd3, 0x01, 0x06, 0x86, 0xd5, 0x73, 0xdc, 0xb0, 0xb9, + 0xdf, 0xb7, 0x43, 0x80, 0xe9, 0x03, 0xe0, 0x86, 0x3e, 0x9c, 0x99, 0x62, 0x5d, 0xbf, 0x4d, 0x45, + 0x03, 0x97, 0x3b, 0x03, 0x98, 0x11, 0xec, 0xde, 0x25, 0x60, 0x56, 0x0f, 0x06, 0xc6, 0x75, 0x5d, + 0xfd, 0xbb, 0x84, 0x96, 0x9b, 0x87, 0xed, 0xa7, 0xd4, 0x0b, 0xfc, 0xfd, 0x38, 0x2b, 0xfe, 0x80, + 0x8a, 0xe1, 0x64, 0x5d, 0x83, 0x1b, 0xb2, 0xb4, 0x21, 0x6d, 0x96, 0x1b, 0x8f, 0xb4, 0x74, 0x2f, + 0x49, 0x03, 0xcd, 0xef, 0xdb, 0x21, 0xc0, 0xb4, 0x90, 0xad, 0x0d, 0xb7, 0xb4, 0xd7, 0xe6, 0x47, + 0xb0, 0xf8, 0x4b, 0xe0, 0x46, 0x0b, 0x9f, 0x8d, 0xd4, 0xdc, 0x78, 0xa4, 0xa2, 0x14, 0x23, 0x89, + 0x2b, 0x36, 0x51, 0x71, 0x08, 0x94, 0x39, 0x9e, 0xcb, 0xe4, 0x85, 0x8d, 0xfc, 0x66, 0xb9, 0xb1, + 0xad, 0xdd, 0xb1, 0x79, 0xad, 0x79, 0xd8, 0x3e, 0x9a, 0x68, 0x92, 0x49, 0x5b, 0x35, 0xd1, 0xa5, + 0x28, 0x2a, 0x8c, 0x24, 0xbe, 0xf5, 0xaf, 0x12, 0x5a, 0x9b, 0xc9, 0xf6, 0xc2, 0x61, 0x1c, 0xbf, + 0x9f, 0xc9, 0xa7, 0xcd, 0x97, 0x2f, 0x54, 0x47, 0xe9, 0x92, 0xbe, 0x31, 0x92, 0xc9, 0xf6, 0x16, + 0x15, 0x1c, 0x0e, 0x83, 0x38, 0x58, 0x63, 0x9e, 0x60, 0xd3, 0x43, 0xb6, 0xaa, 0xc2, 0xbe, 0xd0, + 0x0e, 0x8d, 0xc8, 0xc4, 0xaf, 0xfe, 0x65, 0x11, 0xad, 0x36, 0x0f, 0xdb, 0x04, 0x98, 0x17, 0x50, + 0x0b, 0xd2, 0xbf, 0xd7, 0x03, 0x54, 0xa4, 0x02, 0x8c, 0xf2, 0x94, 0xd2, 0xf9, 0x62, 0x32, 0x49, + 0x18, 0xf8, 0x04, 0x55, 0x28, 0x30, 0xdf, 0x73, 0x19, 0x3c, 0x77, 0xdc, 0xae, 0xbc, 0x10, 0x6d, + 0x60, 0x77, 0xbe, 0x0d, 0x44, 0x83, 0x8a, 0x65, 0x87, 0xea, 0x56, 0x6d, 0x3c, 0x52, 0x2b, 0x24, + 0xe3, 0x47, 0xa6, 0xdc, 0xf1, 0x36, 0x2a, 0x30, 0xcb, 0xf3, 0x41, 0xce, 0x47, 0x83, 0x29, 0x71, + 0xb2, 0x4e, 0x08, 0x5e, 0x8d, 0xd4, 0x6a, 0x3c, 0x61, 0x04, 0x90, 0x09, 0x19, 0xef, 0xa3, 0x1a, + 0x73, 0x5c, 0x3b, 0x38, 0x31, 0x68, 0x5c, 0x97, 0x17, 0x23, 0x03, 0x59, 0x18, 0xd4, 0x3a, 0xd7, + 0xea, 0x64, 0x46, 0x81, 0x55, 0x54, 0x18, 0x02, 0x35, 0x99, 0x5c, 0xd8, 0xc8, 0x6f, 0x96, 0x5a, + 0xa5, 0xb0, 0xef, 0x51, 0x08, 0x90, 0x09, 0x8e, 0x35, 0x84, 0x58, 0xcf, 0xa3, 0xfc, 0x95, 0x31, + 0x00, 0x26, 0xff, 0x13, 0xb1, 0xfe, 0x0d, 0x8f, 0xb6, 0x93, 0xa0, 0x24, 0xc3, 0x08, 0xf9, 0x96, + 0xc1, 0xc1, 0xf6, 0xa8, 0x03, 0x4c, 0x5e, 0x4a, 0xf9, 0x8f, 0x13, 0x94, 0x64, 0x18, 0x98, 0xa2, + 0x0a, 0x0b, 0xcc, 0x78, 0xf3, 0x4c, 0x2e, 0x46, 0x17, 0xb1, 0x37, 0xcf, 0x45, 0x74, 0x52, 0x5d, + 0x7a, 0x17, 0xab, 0x22, 0x7c, 0x25, 0x53, 0x65, 0x64, 0xaa, 0x47, 0xfd, 0xdb, 0x02, 0xfa, 0xff, + 0x16, 0x3d, 0xde, 0x41, 0xe5, 0x0c, 0x57, 0xdc, 0xca, 0x8a, 0x30, 0x2d, 0x67, 0x24, 0x24, 0xcb, + 0xfb, 0xcb, 0x17, 0xc3, 0x50, 0xd5, 0xb0, 0x2c, 0xf0, 0x39, 0x74, 0xdf, 0x9c, 0xfa, 0xc0, 0xe4, + 0x7c, 0xb4, 0xb5, 0x3f, 0x6d, 0xb7, 0x26, 0xe2, 0x55, 0x9b, 0x59, 0x53, 0x32, 0xdd, 0x23, 0x3d, + 0x95, 0xc5, 0x9b, 0x4f, 0xa5, 0xfe, 0x4b, 0x42, 0x2b, 0x37, 0x7c, 0x81, 0xf0, 0x7d, 0xb4, 0x24, + 0xbe, 0x38, 0x62, 0x9d, 0xff, 0x89, 0x7e, 0x4b, 0x82, 0x4a, 0xe2, 0x3a, 0x3e, 0x46, 0xa5, 0xf4, + 0x14, 0x26, 0x1f, 0x87, 0x9d, 0x79, 0x4e, 0x61, 0xe6, 0x85, 0x6f, 0x2d, 0x8b, 0x1e, 0x25, 0x92, + 0x1c, 0x41, 0x6a, 0x8d, 0x0f, 0x50, 0xe9, 0x98, 0x02, 0xeb, 0xb9, 0xc0, 0x98, 0x78, 0xed, 0xee, + 0xc5, 0x82, 0x27, 0x71, 0xe1, 0x6a, 0xa4, 0xe2, 0xc4, 0x30, 0x41, 0x49, 0xaa, 0x6c, 0x1d, 0x9c, + 0x5d, 0x2a, 0xb9, 0xf3, 0x4b, 0x25, 0x77, 0x71, 0xa9, 0xe4, 0x3e, 0x8d, 0x15, 0xe9, 0x6c, 0xac, + 0x48, 0xe7, 0x63, 0x45, 0xba, 0x18, 0x2b, 0xd2, 0x8f, 0xb1, 0x22, 0x7d, 0xfe, 0xa9, 0xe4, 0xde, + 0xa9, 0x77, 0xfc, 0x87, 0xfd, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x66, 0x3b, 0x84, 0x9c, 0x07, + 0x00, 0x00, +} + +func (m *APIGroupDiscovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIGroupDiscovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIGroupDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIGroupDiscoveryList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIGroupDiscoveryList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIGroupDiscoveryList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIResourceDiscovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIResourceDiscovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIResourceDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Subresources) > 0 { + for iNdEx := len(m.Subresources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subresources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.Categories) > 0 { + for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Categories[iNdEx]) + copy(dAtA[i:], m.Categories[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.ShortNames) > 0 { + for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ShortNames[iNdEx]) + copy(dAtA[i:], m.ShortNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i -= len(m.SingularResource) + copy(dAtA[i:], m.SingularResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularResource))) + i-- + dAtA[i] = 0x22 + i -= len(m.Scope) + copy(dAtA[i:], m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope))) + i-- + dAtA[i] = 0x1a + if m.ResponseKind != nil { + { + size, err := m.ResponseKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APISubresourceDiscovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APISubresourceDiscovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APISubresourceDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.AcceptedTypes) > 0 { + for iNdEx := len(m.AcceptedTypes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AcceptedTypes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ResponseKind != nil { + { + size, err := m.ResponseKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Subresource) + copy(dAtA[i:], m.Subresource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIVersionDiscovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIVersionDiscovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIVersionDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Freshness) + copy(dAtA[i:], m.Freshness) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Freshness))) + i-- + dAtA[i] = 0x1a + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *APIGroupDiscovery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIGroupDiscoveryList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIResourceDiscovery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + if m.ResponseKind != nil { + l = m.ResponseKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Scope) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SingularResource) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ShortNames) > 0 { + for _, s := range m.ShortNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Categories) > 0 { + for _, s := range m.Categories { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Subresources) > 0 { + for _, e := range m.Subresources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APISubresourceDiscovery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Subresource) + n += 1 + l + sovGenerated(uint64(l)) + if m.ResponseKind != nil { + l = m.ResponseKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AcceptedTypes) > 0 { + for _, e := range m.AcceptedTypes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIVersionDiscovery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Freshness) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *APIGroupDiscovery) String() string { + if this == nil { + return "nil" + } + repeatedStringForVersions := "[]APIVersionDiscovery{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "APIVersionDiscovery", "APIVersionDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForVersions += "}" + s := strings.Join([]string{`&APIGroupDiscovery{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Versions:` + repeatedStringForVersions + `,`, + `}`, + }, "") + return s +} +func (this *APIGroupDiscoveryList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]APIGroupDiscovery{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "APIGroupDiscovery", "APIGroupDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&APIGroupDiscoveryList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *APIResourceDiscovery) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubresources := "[]APISubresourceDiscovery{" + for _, f := range this.Subresources { + repeatedStringForSubresources += strings.Replace(strings.Replace(f.String(), "APISubresourceDiscovery", "APISubresourceDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForSubresources += "}" + s := strings.Join([]string{`&APIResourceDiscovery{`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `ResponseKind:` + strings.Replace(fmt.Sprintf("%v", this.ResponseKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`, + `Scope:` + fmt.Sprintf("%v", this.Scope) + `,`, + `SingularResource:` + fmt.Sprintf("%v", this.SingularResource) + `,`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`, + `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, + `Subresources:` + repeatedStringForSubresources + `,`, + `}`, + }, "") + return s +} +func (this *APISubresourceDiscovery) String() string { + if this == nil { + return "nil" + } + repeatedStringForAcceptedTypes := "[]GroupVersionKind{" + for _, f := range this.AcceptedTypes { + repeatedStringForAcceptedTypes += fmt.Sprintf("%v", f) + "," + } + repeatedStringForAcceptedTypes += "}" + s := strings.Join([]string{`&APISubresourceDiscovery{`, + `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, + `ResponseKind:` + strings.Replace(fmt.Sprintf("%v", this.ResponseKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`, + `AcceptedTypes:` + repeatedStringForAcceptedTypes + `,`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `}`, + }, "") + return s +} +func (this *APIVersionDiscovery) String() string { + if this == nil { + return "nil" + } + repeatedStringForResources := "[]APIResourceDiscovery{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "APIResourceDiscovery", "APIResourceDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForResources += "}" + s := strings.Join([]string{`&APIVersionDiscovery{`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Resources:` + repeatedStringForResources + `,`, + `Freshness:` + fmt.Sprintf("%v", this.Freshness) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *APIGroupDiscovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIGroupDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIGroupDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, APIVersionDiscovery{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIGroupDiscoveryList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIGroupDiscoveryList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIGroupDiscoveryList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, APIGroupDiscovery{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIResourceDiscovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIResourceDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIResourceDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseKind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseKind == nil { + m.ResponseKind = &v1.GroupVersionKind{} + } + if err := m.ResponseKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scope = ResourceScope(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SingularResource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SingularResource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Categories", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresources = append(m.Subresources, APISubresourceDiscovery{}) + if err := m.Subresources[len(m.Subresources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APISubresourceDiscovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APISubresourceDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APISubresourceDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseKind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseKind == nil { + m.ResponseKind = &v1.GroupVersionKind{} + } + if err := m.ResponseKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AcceptedTypes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AcceptedTypes = append(m.AcceptedTypes, v1.GroupVersionKind{}) + if err := m.AcceptedTypes[len(m.AcceptedTypes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIVersionDiscovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIVersionDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIVersionDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, APIResourceDiscovery{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Freshness", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Freshness = DiscoveryFreshness(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto b/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto new file mode 100644 index 000000000..a09af750b --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto @@ -0,0 +1,156 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.apidiscovery.v2beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/apidiscovery/v2beta1"; + +// APIGroupDiscovery holds information about which resources are being served for all version of the API Group. +// It contains a list of APIVersionDiscovery that holds a list of APIResourceDiscovery types served for a version. +// Versions are in descending order of preference, with the first version being the preferred entry. +message APIGroupDiscovery { + // Standard object's metadata. + // The only field completed will be name. For instance, resourceVersion will be empty. + // name is the name of the API group whose discovery information is presented here. + // name is allowed to be "" to represent the legacy, ungroupified resources. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // versions are the versions supported in this group. They are sorted in descending order of preference, + // with the preferred version being the first entry. + // +listType=map + // +listMapKey=version + repeated APIVersionDiscovery versions = 2; +} + +// APIGroupDiscoveryList is a resource containing a list of APIGroupDiscovery. +// This is one of the types able to be returned from the /api and /apis endpoint and contains an aggregated +// list of API resources (built-ins, Custom Resource Definitions, resources from aggregated servers) +// that a cluster supports. +message APIGroupDiscoveryList { + // ResourceVersion will not be set, because this does not have a replayable ordering among multiple apiservers. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of groups for discovery. The groups are listed in priority order. + repeated APIGroupDiscovery items = 2; +} + +// APIResourceDiscovery provides information about an API resource for discovery. +message APIResourceDiscovery { + // resource is the plural name of the resource. This is used in the URL path and is the unique identifier + // for this resource across all versions in the API group. + // Resources with non-empty groups are located at /apis/// + // Resources with empty groups are located at /api/v1/ + optional string resource = 1; + + // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. + // APIs may return other objects types at their discretion, such as error conditions, requests for alternate representations, or other operation specific behavior. + // This value will be null or empty if an APIService reports subresources but supports no operations on the parent resource + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; + + // scope indicates the scope of a resource, either Cluster or Namespaced + optional string scope = 3; + + // singularResource is the singular name of the resource. This allows clients to handle plural and singular opaquely. + // For many clients the singular form of the resource will be more understandable to users reading messages and should be used when integrating the name of the resource into a sentence. + // The command line tool kubectl, for example, allows use of the singular resource name in place of plurals. + // The singular form of a resource should always be an optional element - when in doubt use the canonical resource name. + optional string singularResource = 4; + + // verbs is a list of supported API operation types (this includes + // but is not limited to get, list, watch, create, update, patch, + // delete, deletecollection, and proxy). + // +listType=set + repeated string verbs = 5; + + // shortNames is a list of suggested short names of the resource. + // +listType=set + repeated string shortNames = 6; + + // categories is a list of the grouped resources this resource belongs to (e.g. 'all'). + // Clients may use this to simplify acting on multiple resource types at once. + // +listType=set + repeated string categories = 7; + + // subresources is a list of subresources provided by this resource. Subresources are located at /apis////name-of-instance/ + // +listType=map + // +listMapKey=subresource + repeated APISubresourceDiscovery subresources = 8; +} + +// APISubresourceDiscovery provides information about an API subresource for discovery. +message APISubresourceDiscovery { + // subresource is the name of the subresource. This is used in the URL path and is the unique identifier + // for this resource across all versions. + optional string subresource = 1; + + // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. + // Some subresources do not return normal resources, these will have null or empty return types. + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; + + // acceptedTypes describes the kinds that this endpoint accepts. + // Subresources may accept the standard content types or define + // custom negotiation schemes. The list may not be exhaustive for + // all operations. + // +listType=map + // +listMapKey=group + // +listMapKey=version + // +listMapKey=kind + repeated k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3; + + // verbs is a list of supported API operation types (this includes + // but is not limited to get, list, watch, create, update, patch, + // delete, deletecollection, and proxy). Subresources may define + // custom verbs outside the standard Kubernetes verb set. Clients + // should expect the behavior of standard verbs to align with + // Kubernetes interaction conventions. + // +listType=set + repeated string verbs = 4; +} + +// APIVersionDiscovery holds a list of APIResourceDiscovery types that are served for a particular version within an API Group. +message APIVersionDiscovery { + // version is the name of the version within a group version. + optional string version = 1; + + // resources is a list of APIResourceDiscovery objects for the corresponding group version. + // +listType=map + // +listMapKey=resource + repeated APIResourceDiscovery resources = 2; + + // freshness marks whether a group version's discovery document is up to date. + // "Current" indicates the discovery document was recently + // refreshed. "Stale" indicates the discovery document could not + // be retrieved and the returned discovery document may be + // significantly out of date. Clients that require the latest + // version of the discovery information be retrieved before + // performing an operation should not use the aggregated document + optional string freshness = 3; +} + diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/register.go b/vendor/k8s.io/api/apidiscovery/v2beta1/register.go new file mode 100644 index 000000000..f7cef1081 --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "apidiscovery.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2beta1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder installs the api group to a scheme + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme adds api to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &APIGroupDiscoveryList{}, + &APIGroupDiscovery{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/types.go b/vendor/k8s.io/api/apidiscovery/v2beta1/types.go new file mode 100644 index 000000000..834293773 --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/types.go @@ -0,0 +1,163 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:deprecated=1.32 +// +k8s:prerelease-lifecycle-gen:removed=1.35 +// The deprecate and remove versions stated above are rough estimates and may be subject to change. We are estimating v2 types will be available in 1.28 and will support 4 versions where both v2beta1 and v2 are supported before deprecation. + +// APIGroupDiscoveryList is a resource containing a list of APIGroupDiscovery. +// This is one of the types able to be returned from the /api and /apis endpoint and contains an aggregated +// list of API resources (built-ins, Custom Resource Definitions, resources from aggregated servers) +// that a cluster supports. +type APIGroupDiscoveryList struct { + v1.TypeMeta `json:",inline"` + // ResourceVersion will not be set, because this does not have a replayable ordering among multiple apiservers. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of groups for discovery. The groups are listed in priority order. + Items []APIGroupDiscovery `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:deprecated=1.32 +// +k8s:prerelease-lifecycle-gen:removed=1.35 +// The deprecate and remove versions stated above are rough estimates and may be subject to change. We are estimating v2 types will be available in 1.28 and will support 4 versions where both v2beta1 and v2 are supported before deprecation. + +// APIGroupDiscovery holds information about which resources are being served for all version of the API Group. +// It contains a list of APIVersionDiscovery that holds a list of APIResourceDiscovery types served for a version. +// Versions are in descending order of preference, with the first version being the preferred entry. +type APIGroupDiscovery struct { + v1.TypeMeta `json:",inline"` + // Standard object's metadata. + // The only field completed will be name. For instance, resourceVersion will be empty. + // name is the name of the API group whose discovery information is presented here. + // name is allowed to be "" to represent the legacy, ungroupified resources. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // versions are the versions supported in this group. They are sorted in descending order of preference, + // with the preferred version being the first entry. + // +listType=map + // +listMapKey=version + Versions []APIVersionDiscovery `json:"versions,omitempty" protobuf:"bytes,2,rep,name=versions"` +} + +// APIVersionDiscovery holds a list of APIResourceDiscovery types that are served for a particular version within an API Group. +type APIVersionDiscovery struct { + // version is the name of the version within a group version. + Version string `json:"version" protobuf:"bytes,1,opt,name=version"` + // resources is a list of APIResourceDiscovery objects for the corresponding group version. + // +listType=map + // +listMapKey=resource + Resources []APIResourceDiscovery `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"` + // freshness marks whether a group version's discovery document is up to date. + // "Current" indicates the discovery document was recently + // refreshed. "Stale" indicates the discovery document could not + // be retrieved and the returned discovery document may be + // significantly out of date. Clients that require the latest + // version of the discovery information be retrieved before + // performing an operation should not use the aggregated document + Freshness DiscoveryFreshness `json:"freshness,omitempty" protobuf:"bytes,3,opt,name=freshness"` +} + +// APIResourceDiscovery provides information about an API resource for discovery. +type APIResourceDiscovery struct { + // resource is the plural name of the resource. This is used in the URL path and is the unique identifier + // for this resource across all versions in the API group. + // Resources with non-empty groups are located at /apis/// + // Resources with empty groups are located at /api/v1/ + Resource string `json:"resource" protobuf:"bytes,1,opt,name=resource"` + // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. + // APIs may return other objects types at their discretion, such as error conditions, requests for alternate representations, or other operation specific behavior. + // This value will be null or empty if an APIService reports subresources but supports no operations on the parent resource + ResponseKind *v1.GroupVersionKind `json:"responseKind,omitempty" protobuf:"bytes,2,opt,name=responseKind"` + // scope indicates the scope of a resource, either Cluster or Namespaced + Scope ResourceScope `json:"scope" protobuf:"bytes,3,opt,name=scope"` + // singularResource is the singular name of the resource. This allows clients to handle plural and singular opaquely. + // For many clients the singular form of the resource will be more understandable to users reading messages and should be used when integrating the name of the resource into a sentence. + // The command line tool kubectl, for example, allows use of the singular resource name in place of plurals. + // The singular form of a resource should always be an optional element - when in doubt use the canonical resource name. + SingularResource string `json:"singularResource" protobuf:"bytes,4,opt,name=singularResource"` + // verbs is a list of supported API operation types (this includes + // but is not limited to get, list, watch, create, update, patch, + // delete, deletecollection, and proxy). + // +listType=set + Verbs []string `json:"verbs" protobuf:"bytes,5,opt,name=verbs"` + // shortNames is a list of suggested short names of the resource. + // +listType=set + ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,6,rep,name=shortNames"` + // categories is a list of the grouped resources this resource belongs to (e.g. 'all'). + // Clients may use this to simplify acting on multiple resource types at once. + // +listType=set + Categories []string `json:"categories,omitempty" protobuf:"bytes,7,rep,name=categories"` + // subresources is a list of subresources provided by this resource. Subresources are located at /apis////name-of-instance/ + // +listType=map + // +listMapKey=subresource + Subresources []APISubresourceDiscovery `json:"subresources,omitempty" protobuf:"bytes,8,rep,name=subresources"` +} + +// ResourceScope is an enum defining the different scopes available to a resource. +type ResourceScope string + +const ( + ScopeCluster ResourceScope = "Cluster" + ScopeNamespace ResourceScope = "Namespaced" +) + +// DiscoveryFreshness is an enum defining whether the Discovery document published by an apiservice is up to date (fresh). +type DiscoveryFreshness string + +const ( + DiscoveryFreshnessCurrent DiscoveryFreshness = "Current" + DiscoveryFreshnessStale DiscoveryFreshness = "Stale" +) + +// APISubresourceDiscovery provides information about an API subresource for discovery. +type APISubresourceDiscovery struct { + // subresource is the name of the subresource. This is used in the URL path and is the unique identifier + // for this resource across all versions. + Subresource string `json:"subresource" protobuf:"bytes,1,opt,name=subresource"` + // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. + // Some subresources do not return normal resources, these will have null or empty return types. + ResponseKind *v1.GroupVersionKind `json:"responseKind,omitempty" protobuf:"bytes,2,opt,name=responseKind"` + // acceptedTypes describes the kinds that this endpoint accepts. + // Subresources may accept the standard content types or define + // custom negotiation schemes. The list may not be exhaustive for + // all operations. + // +listType=map + // +listMapKey=group + // +listMapKey=version + // +listMapKey=kind + AcceptedTypes []v1.GroupVersionKind `json:"acceptedTypes,omitempty" protobuf:"bytes,3,rep,name=acceptedTypes"` + // verbs is a list of supported API operation types (this includes + // but is not limited to get, list, watch, create, update, patch, + // delete, deletecollection, and proxy). Subresources may define + // custom verbs outside the standard Kubernetes verb set. Clients + // should expect the behavior of standard verbs to align with + // Kubernetes interaction conventions. + // +listType=set + Verbs []string `json:"verbs" protobuf:"bytes,4,opt,name=verbs"` +} diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..cf8f98c6f --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.deepcopy.go @@ -0,0 +1,190 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v2beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIGroupDiscovery) DeepCopyInto(out *APIGroupDiscovery) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]APIVersionDiscovery, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroupDiscovery. +func (in *APIGroupDiscovery) DeepCopy() *APIGroupDiscovery { + if in == nil { + return nil + } + out := new(APIGroupDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIGroupDiscovery) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIGroupDiscoveryList) DeepCopyInto(out *APIGroupDiscoveryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]APIGroupDiscovery, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroupDiscoveryList. +func (in *APIGroupDiscoveryList) DeepCopy() *APIGroupDiscoveryList { + if in == nil { + return nil + } + out := new(APIGroupDiscoveryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIGroupDiscoveryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIResourceDiscovery) DeepCopyInto(out *APIResourceDiscovery) { + *out = *in + if in.ResponseKind != nil { + in, out := &in.ResponseKind, &out.ResponseKind + *out = new(v1.GroupVersionKind) + **out = **in + } + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ShortNames != nil { + in, out := &in.ShortNames, &out.ShortNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Subresources != nil { + in, out := &in.Subresources, &out.Subresources + *out = make([]APISubresourceDiscovery, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResourceDiscovery. +func (in *APIResourceDiscovery) DeepCopy() *APIResourceDiscovery { + if in == nil { + return nil + } + out := new(APIResourceDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APISubresourceDiscovery) DeepCopyInto(out *APISubresourceDiscovery) { + *out = *in + if in.ResponseKind != nil { + in, out := &in.ResponseKind, &out.ResponseKind + *out = new(v1.GroupVersionKind) + **out = **in + } + if in.AcceptedTypes != nil { + in, out := &in.AcceptedTypes, &out.AcceptedTypes + *out = make([]v1.GroupVersionKind, len(*in)) + copy(*out, *in) + } + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APISubresourceDiscovery. +func (in *APISubresourceDiscovery) DeepCopy() *APISubresourceDiscovery { + if in == nil { + return nil + } + out := new(APISubresourceDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIVersionDiscovery) DeepCopyInto(out *APIVersionDiscovery) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]APIResourceDiscovery, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersionDiscovery. +func (in *APIVersionDiscovery) DeepCopy() *APIVersionDiscovery { + if in == nil { + return nil + } + out := new(APIVersionDiscovery) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..aa8f15fb4 --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,58 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v2beta1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *APIGroupDiscovery) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *APIGroupDiscovery) APILifecycleDeprecated() (major, minor int) { + return 1, 32 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *APIGroupDiscovery) APILifecycleRemoved() (major, minor int) { + return 1, 35 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *APIGroupDiscoveryList) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *APIGroupDiscoveryList) APILifecycleDeprecated() (major, minor int) { + return 1, 32 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *APIGroupDiscoveryList) APILifecycleRemoved() (major, minor int) { + return 1, 35 +} diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go index 4effbc6c1..6871da414 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go @@ -225,55 +225,57 @@ func init() { } var fileDescriptor_a3903ff5e3cc7a03 = []byte{ - // 768 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xdf, 0x4e, 0x13, 0x4d, - 0x14, 0xef, 0xd2, 0x52, 0x60, 0xfa, 0x7d, 0xf4, 0x63, 0x3e, 0x08, 0xb5, 0x26, 0x5b, 0x6c, 0x82, - 0x41, 0x8d, 0xbb, 0xd2, 0x88, 0x91, 0x98, 0x68, 0x58, 0x20, 0x06, 0x05, 0x31, 0x03, 0xf1, 0x02, - 0xbd, 0x70, 0xba, 0x3b, 0x6e, 0xd7, 0x76, 0x77, 0x36, 0x3b, 0xd3, 0x26, 0xdc, 0x18, 0x1f, 0xc1, - 0x07, 0xf1, 0xd2, 0x87, 0xe0, 0xca, 0x70, 0x63, 0x42, 0x62, 0xd2, 0xc8, 0xfa, 0x16, 0x5c, 0x99, - 0x99, 0xdd, 0xb6, 0x6c, 0xbb, 0xc4, 0x86, 0x8b, 0x26, 0x9d, 0x73, 0xce, 0xef, 0x77, 0xfe, 0xcc, - 0x6f, 0xce, 0x82, 0x57, 0xcd, 0xc7, 0x4c, 0x73, 0xa8, 0xde, 0x6c, 0xd7, 0x49, 0xe0, 0x11, 0x4e, - 0x98, 0xde, 0x21, 0x9e, 0x45, 0x03, 0x3d, 0x76, 0x60, 0xdf, 0x11, 0x3f, 0x46, 0x82, 0x0e, 0x09, - 0x1c, 0x8f, 0x93, 0xc0, 0xc3, 0x2d, 0xbd, 0xb3, 0x8a, 0x5b, 0x7e, 0x03, 0xaf, 0xea, 0x36, 0xf1, - 0x48, 0x80, 0x39, 0xb1, 0x34, 0x3f, 0xa0, 0x9c, 0xc2, 0xe5, 0x08, 0xa6, 0x61, 0xdf, 0xd1, 0x46, - 0x60, 0x5a, 0x0f, 0x56, 0xbe, 0x6f, 0x3b, 0xbc, 0xd1, 0xae, 0x6b, 0x26, 0x75, 0x75, 0x9b, 0xda, - 0x54, 0x97, 0xe8, 0x7a, 0xfb, 0x83, 0x3c, 0xc9, 0x83, 0xfc, 0x17, 0xb1, 0x96, 0x1f, 0x0e, 0x8a, - 0x71, 0xb1, 0xd9, 0x70, 0x3c, 0x12, 0x1c, 0xeb, 0x7e, 0xd3, 0x96, 0x95, 0xe9, 0x2e, 0xe1, 0x58, - 0xef, 0x8c, 0xd4, 0x52, 0xd6, 0xaf, 0x42, 0x05, 0x6d, 0x8f, 0x3b, 0x2e, 0x19, 0x01, 0x3c, 0xfa, - 0x1b, 0x80, 0x99, 0x0d, 0xe2, 0xe2, 0x61, 0x5c, 0xf5, 0x87, 0x02, 0xe6, 0x0f, 0x64, 0xa7, 0x07, - 0x9c, 0x06, 0xd8, 0x26, 0x6f, 0x48, 0xc0, 0x1c, 0xea, 0xc1, 0x35, 0x50, 0xc0, 0xbe, 0x13, 0xb9, - 0x76, 0xb6, 0x4a, 0xca, 0x92, 0xb2, 0x32, 0x63, 0xfc, 0x7f, 0xd2, 0xad, 0x64, 0xc2, 0x6e, 0xa5, - 0xb0, 0xf1, 0x7a, 0xa7, 0xe7, 0x42, 0x97, 0xe3, 0xe0, 0x06, 0x28, 0x12, 0xcf, 0xa4, 0x96, 0xe3, - 0xd9, 0x31, 0x53, 0x69, 0x42, 0x42, 0x17, 0x63, 0x68, 0x71, 0x3b, 0xe9, 0x46, 0xc3, 0xf1, 0x70, - 0x13, 0xcc, 0x59, 0xc4, 0xa4, 0x16, 0xae, 0xb7, 0x7a, 0xd5, 0xb0, 0x52, 0x76, 0x29, 0xbb, 0x32, - 0x63, 0x2c, 0x84, 0xdd, 0xca, 0xdc, 0xd6, 0xb0, 0x13, 0x8d, 0xc6, 0x57, 0xbf, 0x4d, 0x80, 0xd9, - 0xa1, 0x8e, 0xde, 0x83, 0x69, 0x31, 0x6e, 0x0b, 0x73, 0x2c, 0xdb, 0x29, 0xd4, 0x1e, 0x68, 0x83, - 0x2b, 0xef, 0x4f, 0x4d, 0xf3, 0x9b, 0xb6, 0xbc, 0x7f, 0x4d, 0x44, 0x6b, 0x9d, 0x55, 0x6d, 0xbf, - 0xfe, 0x91, 0x98, 0x7c, 0x8f, 0x70, 0x6c, 0xc0, 0xb8, 0x0b, 0x30, 0xb0, 0xa1, 0x3e, 0x2b, 0x7c, - 0x0b, 0x72, 0xcc, 0x27, 0xa6, 0xec, 0xb8, 0x50, 0x5b, 0xd7, 0xc6, 0x12, 0x94, 0x96, 0x2c, 0xf3, - 0xc0, 0x27, 0xa6, 0xf1, 0x4f, 0x9c, 0x26, 0x27, 0x4e, 0x48, 0x92, 0x42, 0x13, 0xe4, 0x19, 0xc7, - 0xbc, 0x2d, 0x66, 0x21, 0xe8, 0x9f, 0x5c, 0x8f, 0x5e, 0x52, 0x18, 0xb3, 0x71, 0x82, 0x7c, 0x74, - 0x46, 0x31, 0x75, 0xf5, 0x6b, 0x16, 0x2c, 0x26, 0x01, 0x9b, 0xd4, 0xb3, 0x1c, 0x2e, 0xe6, 0xf7, - 0x0c, 0xe4, 0xf8, 0xb1, 0x4f, 0x62, 0x29, 0xdc, 0xeb, 0x95, 0x78, 0x78, 0xec, 0x93, 0x8b, 0x6e, - 0xe5, 0xe6, 0x15, 0x30, 0xe1, 0x46, 0x12, 0x08, 0xd7, 0xfb, 0x1d, 0x44, 0x92, 0xb8, 0x95, 0x2c, - 0xe2, 0xa2, 0x5b, 0x29, 0xf6, 0x61, 0xc9, 0xba, 0xe0, 0x0b, 0x00, 0x69, 0x5d, 0x76, 0x68, 0x3d, - 0x8f, 0x14, 0x2c, 0x94, 0x25, 0x06, 0x91, 0x35, 0xca, 0x31, 0x0d, 0xdc, 0x1f, 0x89, 0x40, 0x29, - 0x28, 0xd8, 0x01, 0xb0, 0x85, 0x19, 0x3f, 0x0c, 0xb0, 0xc7, 0xa2, 0x12, 0x1d, 0x97, 0x94, 0x72, - 0x72, 0xa8, 0x77, 0xc7, 0x53, 0x84, 0x40, 0x0c, 0xf2, 0xee, 0x8e, 0xb0, 0xa1, 0x94, 0x0c, 0xf0, - 0x36, 0xc8, 0x07, 0x04, 0x33, 0xea, 0x95, 0x26, 0x65, 0xfb, 0xfd, 0x3b, 0x40, 0xd2, 0x8a, 0x62, - 0x2f, 0xbc, 0x03, 0xa6, 0x5c, 0xc2, 0x18, 0xb6, 0x49, 0x29, 0x2f, 0x03, 0x8b, 0x71, 0xe0, 0xd4, - 0x5e, 0x64, 0x46, 0x3d, 0x7f, 0xf5, 0xbb, 0x02, 0x60, 0x72, 0xee, 0xbb, 0x0e, 0xe3, 0xf0, 0xdd, - 0x88, 0xd2, 0xb5, 0xf1, 0xfa, 0x12, 0x68, 0xa9, 0xf3, 0xff, 0xe2, 0x94, 0xd3, 0x3d, 0xcb, 0x25, - 0x95, 0x1f, 0x81, 0x49, 0x87, 0x13, 0x57, 0xdc, 0x62, 0x76, 0xa5, 0x50, 0x5b, 0xbb, 0x96, 0x0e, - 0x8d, 0x7f, 0xe3, 0x0c, 0x93, 0x3b, 0x82, 0x0b, 0x45, 0x94, 0xd5, 0xf9, 0xe1, 0x7e, 0xc4, 0x03, - 0xa8, 0xfe, 0x9c, 0x00, 0xf3, 0x69, 0x32, 0x86, 0x9f, 0x40, 0x91, 0x25, 0xec, 0xac, 0xa4, 0xc8, - 0xa2, 0xc6, 0x7e, 0x1c, 0x29, 0xab, 0x6f, 0xb0, 0xaa, 0x92, 0x76, 0x86, 0x86, 0x93, 0xc1, 0x7d, - 0xb0, 0x60, 0x52, 0xd7, 0xa5, 0xde, 0x76, 0xea, 0xce, 0xbb, 0x11, 0x76, 0x2b, 0x0b, 0x9b, 0x69, - 0x01, 0x28, 0x1d, 0x07, 0x03, 0x00, 0xcc, 0xde, 0x13, 0x88, 0x96, 0x5e, 0xa1, 0xf6, 0xf4, 0x5a, - 0x03, 0xee, 0xbf, 0xa4, 0xc1, 0xce, 0xea, 0x9b, 0x18, 0xba, 0x94, 0xc5, 0x78, 0x79, 0x72, 0xae, - 0x66, 0x4e, 0xcf, 0xd5, 0xcc, 0xd9, 0xb9, 0x9a, 0xf9, 0x1c, 0xaa, 0xca, 0x49, 0xa8, 0x2a, 0xa7, - 0xa1, 0xaa, 0x9c, 0x85, 0xaa, 0xf2, 0x2b, 0x54, 0x95, 0x2f, 0xbf, 0xd5, 0xcc, 0xd1, 0xf2, 0x58, - 0x1f, 0xd5, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa0, 0xd0, 0x65, 0xbc, 0x95, 0x07, 0x00, 0x00, + // 790 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x41, 0x4f, 0xdb, 0x48, + 0x14, 0x8e, 0x49, 0x08, 0x30, 0xd9, 0x4d, 0x96, 0x59, 0x10, 0xd9, 0xac, 0xe4, 0xb0, 0x91, 0x58, + 0xb1, 0xbb, 0x5a, 0x7b, 0x89, 0x96, 0xaa, 0xb4, 0x52, 0x2b, 0x0c, 0xa8, 0xa2, 0x85, 0x52, 0x4d, + 0x50, 0x0f, 0xb4, 0x87, 0x4e, 0xec, 0xa9, 0xe3, 0x26, 0xf6, 0x58, 0x9e, 0x49, 0x24, 0x2e, 0x55, + 0x7f, 0x42, 0xfb, 0x3f, 0x7a, 0xec, 0x8f, 0xe0, 0x54, 0x71, 0x44, 0xaa, 0x14, 0x15, 0xf7, 0x5f, + 0x70, 0xaa, 0x66, 0xec, 0x38, 0x38, 0x09, 0x6a, 0xc4, 0x21, 0x52, 0xe6, 0xbd, 0xf7, 0x7d, 0xef, + 0xcd, 0x37, 0xdf, 0x8c, 0xc1, 0xd3, 0xf6, 0x5d, 0xa6, 0x39, 0x54, 0x6f, 0x77, 0x9b, 0x24, 0xf0, + 0x08, 0x27, 0x4c, 0xef, 0x11, 0xcf, 0xa2, 0x81, 0x1e, 0x27, 0xb0, 0xef, 0x88, 0x1f, 0x23, 0x41, + 0x8f, 0x04, 0x8e, 0xc7, 0x49, 0xe0, 0xe1, 0x8e, 0xde, 0xdb, 0xc0, 0x1d, 0xbf, 0x85, 0x37, 0x74, + 0x9b, 0x78, 0x24, 0xc0, 0x9c, 0x58, 0x9a, 0x1f, 0x50, 0x4e, 0xe1, 0x5a, 0x04, 0xd3, 0xb0, 0xef, + 0x68, 0x63, 0x30, 0x6d, 0x00, 0xab, 0xfc, 0x6b, 0x3b, 0xbc, 0xd5, 0x6d, 0x6a, 0x26, 0x75, 0x75, + 0x9b, 0xda, 0x54, 0x97, 0xe8, 0x66, 0xf7, 0xb5, 0x5c, 0xc9, 0x85, 0xfc, 0x17, 0xb1, 0x56, 0xfe, + 0x1f, 0x0e, 0xe3, 0x62, 0xb3, 0xe5, 0x78, 0x24, 0x38, 0xd5, 0xfd, 0xb6, 0x2d, 0x27, 0xd3, 0x5d, + 0xc2, 0xb1, 0xde, 0x1b, 0x9b, 0xa5, 0xa2, 0xdf, 0x84, 0x0a, 0xba, 0x1e, 0x77, 0x5c, 0x32, 0x06, + 0xb8, 0xf3, 0x23, 0x00, 0x33, 0x5b, 0xc4, 0xc5, 0xa3, 0xb8, 0xda, 0x87, 0x19, 0xb0, 0xd4, 0x90, + 0x3b, 0x6d, 0x70, 0x1a, 0x60, 0x9b, 0x3c, 0x27, 0x01, 0x73, 0xa8, 0x07, 0x37, 0x41, 0x01, 0xfb, + 0x4e, 0x94, 0xda, 0xdf, 0x2d, 0x2b, 0xab, 0xca, 0xfa, 0x82, 0xf1, 0xeb, 0x59, 0xbf, 0x9a, 0x09, + 0xfb, 0xd5, 0xc2, 0xf6, 0xb3, 0xfd, 0x41, 0x0a, 0x5d, 0xaf, 0x83, 0xdb, 0xa0, 0x44, 0x3c, 0x93, + 0x5a, 0x8e, 0x67, 0xc7, 0x4c, 0xe5, 0x19, 0x09, 0x5d, 0x89, 0xa1, 0xa5, 0xbd, 0x74, 0x1a, 0x8d, + 0xd6, 0xc3, 0x1d, 0xb0, 0x68, 0x11, 0x93, 0x5a, 0xb8, 0xd9, 0x19, 0x4c, 0xc3, 0xca, 0xd9, 0xd5, + 0xec, 0xfa, 0x82, 0xb1, 0x1c, 0xf6, 0xab, 0x8b, 0xbb, 0xa3, 0x49, 0x34, 0x5e, 0x0f, 0xef, 0x81, + 0xa2, 0x3c, 0x40, 0x2b, 0x61, 0xc8, 0x49, 0x06, 0x18, 0xf6, 0xab, 0xc5, 0x46, 0x2a, 0x83, 0x46, + 0x2a, 0x6b, 0x9f, 0x66, 0x40, 0x71, 0x44, 0x8d, 0x57, 0x60, 0x5e, 0x1c, 0x95, 0x85, 0x39, 0x96, + 0x52, 0x14, 0xea, 0xff, 0x69, 0x43, 0xbb, 0x24, 0x8a, 0x6b, 0x7e, 0xdb, 0x96, 0xde, 0xd1, 0x44, + 0xb5, 0xd6, 0xdb, 0xd0, 0x8e, 0x9a, 0x6f, 0x88, 0xc9, 0x0f, 0x09, 0xc7, 0x06, 0x8c, 0x15, 0x00, + 0xc3, 0x18, 0x4a, 0x58, 0xe1, 0x0b, 0x90, 0x63, 0x3e, 0x31, 0xa5, 0x5a, 0x85, 0xfa, 0x96, 0x36, + 0x95, 0x19, 0xb5, 0xf4, 0x98, 0x0d, 0x9f, 0x98, 0xc6, 0x4f, 0x71, 0x9b, 0x9c, 0x58, 0x21, 0x49, + 0x0a, 0x4d, 0x90, 0x67, 0x1c, 0xf3, 0xae, 0xd0, 0x51, 0xd0, 0xdf, 0xbf, 0x1d, 0xbd, 0xa4, 0x30, + 0x8a, 0x71, 0x83, 0x7c, 0xb4, 0x46, 0x31, 0x75, 0xed, 0x63, 0x16, 0xac, 0xa4, 0x01, 0x3b, 0xd4, + 0xb3, 0x1c, 0x2e, 0xf4, 0x7b, 0x08, 0x72, 0xfc, 0xd4, 0x27, 0xb1, 0x8d, 0xfe, 0x19, 0x8c, 0x78, + 0x7c, 0xea, 0x93, 0xab, 0x7e, 0xf5, 0xf7, 0x1b, 0x60, 0x22, 0x8d, 0x24, 0x10, 0x6e, 0x25, 0x3b, + 0x88, 0xec, 0xf4, 0x47, 0x7a, 0x88, 0xab, 0x7e, 0xb5, 0x94, 0xc0, 0xd2, 0x73, 0xc1, 0xc7, 0x00, + 0xd2, 0x66, 0x74, 0xc4, 0x8f, 0x22, 0xf7, 0x0b, 0x57, 0x0a, 0x21, 0xb2, 0x46, 0x25, 0xa6, 0x81, + 0x47, 0x63, 0x15, 0x68, 0x02, 0x0a, 0xf6, 0x00, 0xec, 0x60, 0xc6, 0x8f, 0x03, 0xec, 0xb1, 0x68, + 0x44, 0xc7, 0x25, 0xe5, 0x9c, 0x14, 0xf5, 0xef, 0xe9, 0x1c, 0x21, 0x10, 0xc3, 0xbe, 0x07, 0x63, + 0x6c, 0x68, 0x42, 0x07, 0xf8, 0x27, 0xc8, 0x07, 0x04, 0x33, 0xea, 0x95, 0x67, 0xe5, 0xf6, 0x93, + 0x33, 0x40, 0x32, 0x8a, 0xe2, 0x2c, 0xfc, 0x0b, 0xcc, 0xb9, 0x84, 0x31, 0x6c, 0x93, 0x72, 0x5e, + 0x16, 0x96, 0xe2, 0xc2, 0xb9, 0xc3, 0x28, 0x8c, 0x06, 0xf9, 0xda, 0x67, 0x05, 0xc0, 0xb4, 0xee, + 0x07, 0x0e, 0xe3, 0xf0, 0xe5, 0x98, 0xd3, 0xb5, 0xe9, 0xf6, 0x25, 0xd0, 0xd2, 0xe7, 0xbf, 0xc4, + 0x2d, 0xe7, 0x07, 0x91, 0x6b, 0x2e, 0x3f, 0x01, 0xb3, 0x0e, 0x27, 0xae, 0x38, 0xc5, 0xec, 0x7a, + 0xa1, 0xbe, 0x79, 0x2b, 0x1f, 0x1a, 0x3f, 0xc7, 0x1d, 0x66, 0xf7, 0x05, 0x17, 0x8a, 0x28, 0x6b, + 0x4b, 0xa3, 0xfb, 0x11, 0x17, 0xa0, 0xf6, 0x45, 0x3c, 0x70, 0x13, 0x6c, 0x0c, 0xdf, 0x82, 0x12, + 0x4b, 0xc5, 0x59, 0x59, 0x91, 0x43, 0x4d, 0x7d, 0x39, 0x26, 0x3c, 0x9b, 0xc3, 0x67, 0x2e, 0x1d, + 0x67, 0x68, 0xb4, 0x19, 0x3c, 0x02, 0xcb, 0x26, 0x75, 0x5d, 0xea, 0xed, 0x4d, 0x7c, 0x2f, 0x7f, + 0x0b, 0xfb, 0xd5, 0xe5, 0x9d, 0x49, 0x05, 0x68, 0x32, 0x0e, 0x06, 0x00, 0x98, 0x83, 0x2b, 0x10, + 0x3d, 0x98, 0x85, 0xfa, 0x83, 0x5b, 0x09, 0x9c, 0xdc, 0xa4, 0xe1, 0x9b, 0x95, 0x84, 0x18, 0xba, + 0xd6, 0xc5, 0x78, 0x72, 0x76, 0xa9, 0x66, 0xce, 0x2f, 0xd5, 0xcc, 0xc5, 0xa5, 0x9a, 0x79, 0x17, + 0xaa, 0xca, 0x59, 0xa8, 0x2a, 0xe7, 0xa1, 0xaa, 0x5c, 0x84, 0xaa, 0xf2, 0x35, 0x54, 0x95, 0xf7, + 0xdf, 0xd4, 0xcc, 0xc9, 0xda, 0x54, 0x1f, 0xe4, 0xef, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x3a, + 0x2e, 0x07, 0xd1, 0x07, 0x00, 0x00, } func (m *ServerStorageVersion) Marshal() (dAtA []byte, err error) { @@ -296,6 +298,15 @@ func (m *ServerStorageVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.ServedVersions) > 0 { + for iNdEx := len(m.ServedVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ServedVersions[iNdEx]) + copy(dAtA[i:], m.ServedVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServedVersions[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } if len(m.DecodableVersions) > 0 { for iNdEx := len(m.DecodableVersions) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.DecodableVersions[iNdEx]) @@ -582,6 +593,12 @@ func (m *ServerStorageVersion) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.ServedVersions) > 0 { + for _, s := range m.ServedVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -685,6 +702,7 @@ func (this *ServerStorageVersion) String() string { `APIServerID:` + fmt.Sprintf("%v", this.APIServerID) + `,`, `EncodingVersion:` + fmt.Sprintf("%v", this.EncodingVersion) + `,`, `DecodableVersions:` + fmt.Sprintf("%v", this.DecodableVersions) + `,`, + `ServedVersions:` + fmt.Sprintf("%v", this.ServedVersions) + `,`, `}`, }, "") return s @@ -896,6 +914,38 @@ func (m *ServerStorageVersion) Unmarshal(dAtA []byte) error { } m.DecodableVersions = append(m.DecodableVersions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServedVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServedVersions = append(m.ServedVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto index 37ac0d326..6e6bab521 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto @@ -42,9 +42,14 @@ message ServerStorageVersion { // The encodingVersion must be included in the decodableVersions. // +listType=set repeated string decodableVersions = 3; + + // The API server can serve these versions. + // DecodableVersions must include all ServedVersions. + // +listType=set + repeated string servedVersions = 4; } -// Storage version of a specific resource. +// Storage version of a specific resource. message StorageVersion { // The name is .. optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go index bfa249e13..0ffcf95f0 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go @@ -24,7 +24,7 @@ import ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Storage version of a specific resource. +// Storage version of a specific resource. type StorageVersion struct { metav1.TypeMeta `json:",inline"` // The name is .. @@ -77,6 +77,11 @@ type ServerStorageVersion struct { // The encodingVersion must be included in the decodableVersions. // +listType=set DecodableVersions []string `json:"decodableVersions,omitempty" protobuf:"bytes,3,opt,name=decodableVersions"` + + // The API server can serve these versions. + // DecodableVersions must include all ServedVersions. + // +listType=set + ServedVersions []string `json:"servedVersions,omitempty" protobuf:"bytes,4,opt,name=servedVersions"` } type StorageVersionConditionType string diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go index 297ed08a7..6fd1c3ebe 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ServerStorageVersion = map[string]string{ @@ -32,6 +32,7 @@ var map_ServerStorageVersion = map[string]string{ "apiServerID": "The ID of the reporting API server.", "encodingVersion": "The API server encodes the object to this version when persisting it in the backend (e.g., etcd).", "decodableVersions": "The API server can decode objects encoded in these versions. The encodingVersion must be included in the decodableVersions.", + "servedVersions": "The API server can serve these versions. DecodableVersions must include all ServedVersions.", } func (ServerStorageVersion) SwaggerDoc() map[string]string { @@ -39,7 +40,7 @@ func (ServerStorageVersion) SwaggerDoc() map[string]string { } var map_StorageVersion = map[string]string{ - "": "\n Storage version of a specific resource.", + "": "Storage version of a specific resource.", "metadata": "The name is ..", "spec": "Spec is an empty spec. It is here to comply with Kubernetes API style.", "status": "API server instances report the version they can decode and the version they encode objects to when persisting objects in the backend.", diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go index 44dffa751..638d80140 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go @@ -33,6 +33,11 @@ func (in *ServerStorageVersion) DeepCopyInto(out *ServerStorageVersion) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.ServedVersions != nil { + in, out := &in.ServedVersions, &out.ServedVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go index ee10998d5..84a7af599 100644 --- a/vendor/k8s.io/api/apps/v1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -748,12 +748,40 @@ func (m *StatefulSetList) XXX_DiscardUnknown() { var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo +func (m *StatefulSetOrdinals) Reset() { *m = StatefulSetOrdinals{} } +func (*StatefulSetOrdinals) ProtoMessage() {} +func (*StatefulSetOrdinals) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{25} +} +func (m *StatefulSetOrdinals) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetOrdinals) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetOrdinals) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetOrdinals.Merge(m, src) +} +func (m *StatefulSetOrdinals) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetOrdinals) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetOrdinals.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetOrdinals proto.InternalMessageInfo + func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Reset() { *m = StatefulSetPersistentVolumeClaimRetentionPolicy{} } func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {} func (*StatefulSetPersistentVolumeClaimRetentionPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{25} + return fileDescriptor_e1014cab6f31e43b, []int{26} } func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -781,7 +809,7 @@ var xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy proto.Intern func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} func (*StatefulSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{26} + return fileDescriptor_e1014cab6f31e43b, []int{27} } func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -809,7 +837,7 @@ var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} func (*StatefulSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{27} + return fileDescriptor_e1014cab6f31e43b, []int{28} } func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -837,7 +865,7 @@ var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{28} + return fileDescriptor_e1014cab6f31e43b, []int{29} } func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -888,6 +916,7 @@ func init() { proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1.StatefulSetList") + proto.RegisterType((*StatefulSetOrdinals)(nil), "k8s.io.api.apps.v1.StatefulSetOrdinals") proto.RegisterType((*StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), "k8s.io.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy") proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1.StatefulSetSpec") proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1.StatefulSetStatus") @@ -899,142 +928,146 @@ func init() { } var fileDescriptor_e1014cab6f31e43b = []byte{ - // 2160 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1c, 0xb7, - 0x15, 0xd7, 0xec, 0x87, 0xb4, 0xa2, 0x2c, 0xc9, 0xa6, 0x54, 0x69, 0x63, 0x37, 0xbb, 0xee, 0xd6, - 0x75, 0x94, 0x38, 0xde, 0xad, 0x1d, 0x27, 0x08, 0xe2, 0x22, 0x81, 0x66, 0x95, 0xa6, 0x69, 0x24, - 0x59, 0xa5, 0x2c, 0x07, 0x70, 0xd3, 0xa2, 0xd4, 0x2c, 0xbd, 0x9a, 0x68, 0xbe, 0x30, 0xc3, 0x51, - 0x2c, 0xf4, 0x52, 0x14, 0xe8, 0xad, 0x87, 0xfe, 0x27, 0x45, 0x51, 0x34, 0xb7, 0x20, 0x08, 0x7a, - 0xf1, 0xa5, 0x68, 0xd0, 0x4b, 0x73, 0x5a, 0xd4, 0x9b, 0x53, 0x51, 0xf4, 0xd6, 0x5e, 0x7c, 0x69, - 0x41, 0x0e, 0xe7, 0x9b, 0xa3, 0x5d, 0xc9, 0xb1, 0xf2, 0x01, 0xdf, 0xb4, 0xe4, 0xef, 0xfd, 0xf8, - 0x48, 0xbe, 0xc7, 0xf7, 0x23, 0x47, 0xe0, 0xe6, 0xfe, 0xab, 0x5e, 0x5b, 0xb7, 0x3b, 0xfb, 0xfe, - 0x2e, 0x71, 0x2d, 0x42, 0x89, 0xd7, 0x39, 0x20, 0x56, 0xcf, 0x76, 0x3b, 0xa2, 0x03, 0x3b, 0x7a, - 0x07, 0x3b, 0x8e, 0xd7, 0x39, 0xb8, 0xd6, 0xe9, 0x13, 0x8b, 0xb8, 0x98, 0x92, 0x5e, 0xdb, 0x71, - 0x6d, 0x6a, 0x43, 0x18, 0x60, 0xda, 0xd8, 0xd1, 0xdb, 0x0c, 0xd3, 0x3e, 0xb8, 0x76, 0xfe, 0x6a, - 0x5f, 0xa7, 0x7b, 0xfe, 0x6e, 0x5b, 0xb3, 0xcd, 0x4e, 0xdf, 0xee, 0xdb, 0x1d, 0x0e, 0xdd, 0xf5, - 0xef, 0xf1, 0x5f, 0xfc, 0x07, 0xff, 0x2b, 0xa0, 0x38, 0xdf, 0x4a, 0x0c, 0xa3, 0xd9, 0x2e, 0x91, - 0x0c, 0x73, 0xfe, 0x46, 0x8c, 0x31, 0xb1, 0xb6, 0xa7, 0x5b, 0xc4, 0x3d, 0xec, 0x38, 0xfb, 0x7d, - 0xd6, 0xe0, 0x75, 0x4c, 0x42, 0xb1, 0xcc, 0xaa, 0x53, 0x64, 0xe5, 0xfa, 0x16, 0xd5, 0x4d, 0x92, - 0x33, 0x78, 0x65, 0x94, 0x81, 0xa7, 0xed, 0x11, 0x13, 0xe7, 0xec, 0x5e, 0x2a, 0xb2, 0xf3, 0xa9, - 0x6e, 0x74, 0x74, 0x8b, 0x7a, 0xd4, 0xcd, 0x1a, 0xb5, 0xfe, 0xab, 0x00, 0xd8, 0xb5, 0x2d, 0xea, - 0xda, 0x86, 0x41, 0x5c, 0x44, 0x0e, 0x74, 0x4f, 0xb7, 0x2d, 0xf8, 0x0b, 0x50, 0x63, 0xf3, 0xe9, - 0x61, 0x8a, 0xeb, 0xca, 0x45, 0x65, 0x65, 0xe6, 0xfa, 0xf7, 0xdb, 0xf1, 0x22, 0x47, 0xf4, 0x6d, - 0x67, 0xbf, 0xcf, 0x1a, 0xbc, 0x36, 0x43, 0xb7, 0x0f, 0xae, 0xb5, 0x6f, 0xed, 0xbe, 0x4f, 0x34, - 0xba, 0x41, 0x28, 0x56, 0xe1, 0x83, 0x41, 0x73, 0x62, 0x38, 0x68, 0x82, 0xb8, 0x0d, 0x45, 0xac, - 0xf0, 0x16, 0xa8, 0x70, 0xf6, 0x12, 0x67, 0xbf, 0x5a, 0xc8, 0x2e, 0x26, 0xdd, 0x46, 0xf8, 0x83, - 0x37, 0xef, 0x53, 0x62, 0x31, 0xf7, 0xd4, 0x33, 0x82, 0xba, 0xb2, 0x86, 0x29, 0x46, 0x9c, 0x08, - 0xbe, 0x08, 0x6a, 0xae, 0x70, 0xbf, 0x5e, 0xbe, 0xa8, 0xac, 0x94, 0xd5, 0xb3, 0x02, 0x55, 0x0b, - 0xa7, 0x85, 0x22, 0x44, 0xeb, 0xcf, 0x0a, 0x58, 0xca, 0xcf, 0x7b, 0x5d, 0xf7, 0x28, 0x7c, 0x2f, - 0x37, 0xf7, 0xf6, 0x78, 0x73, 0x67, 0xd6, 0x7c, 0xe6, 0xd1, 0xc0, 0x61, 0x4b, 0x62, 0xde, 0xef, - 0x80, 0xaa, 0x4e, 0x89, 0xe9, 0xd5, 0x4b, 0x17, 0xcb, 0x2b, 0x33, 0xd7, 0x2f, 0xb7, 0xf3, 0xb1, - 0xdb, 0xce, 0x3b, 0xa6, 0xce, 0x0a, 0xca, 0xea, 0xdb, 0xcc, 0x18, 0x05, 0x1c, 0xad, 0xff, 0x29, - 0x60, 0x7a, 0x0d, 0x13, 0xd3, 0xb6, 0xb6, 0x09, 0x3d, 0x85, 0x4d, 0xeb, 0x82, 0x8a, 0xe7, 0x10, - 0x4d, 0x6c, 0xda, 0x77, 0x64, 0xbe, 0x47, 0xee, 0x6c, 0x3b, 0x44, 0x8b, 0x37, 0x8a, 0xfd, 0x42, - 0xdc, 0x18, 0xbe, 0x03, 0x26, 0x3d, 0x8a, 0xa9, 0xef, 0xf1, 0x6d, 0x9a, 0xb9, 0xfe, 0xdd, 0xa3, - 0x69, 0x38, 0x54, 0x9d, 0x13, 0x44, 0x93, 0xc1, 0x6f, 0x24, 0x28, 0x5a, 0xff, 0x2c, 0x01, 0x18, - 0x61, 0xbb, 0xb6, 0xd5, 0xd3, 0x29, 0x8b, 0xdf, 0xd7, 0x40, 0x85, 0x1e, 0x3a, 0x84, 0x2f, 0xc3, - 0xb4, 0x7a, 0x39, 0xf4, 0xe2, 0xf6, 0xa1, 0x43, 0x1e, 0x0d, 0x9a, 0x4b, 0x79, 0x0b, 0xd6, 0x83, - 0xb8, 0x0d, 0x5c, 0x8f, 0xfc, 0x2b, 0x71, 0xeb, 0x1b, 0xe9, 0xa1, 0x1f, 0x0d, 0x9a, 0x92, 0xc3, - 0xa2, 0x1d, 0x31, 0xa5, 0x1d, 0x84, 0x07, 0x00, 0x1a, 0xd8, 0xa3, 0xb7, 0x5d, 0x6c, 0x79, 0xc1, - 0x48, 0xba, 0x49, 0xc4, 0xcc, 0x5f, 0x18, 0x6f, 0x7b, 0x98, 0x85, 0x7a, 0x5e, 0x78, 0x01, 0xd7, - 0x73, 0x6c, 0x48, 0x32, 0x02, 0xbc, 0x0c, 0x26, 0x5d, 0x82, 0x3d, 0xdb, 0xaa, 0x57, 0xf8, 0x2c, - 0xa2, 0x05, 0x44, 0xbc, 0x15, 0x89, 0x5e, 0xf8, 0x3c, 0x98, 0x32, 0x89, 0xe7, 0xe1, 0x3e, 0xa9, - 0x57, 0x39, 0x70, 0x5e, 0x00, 0xa7, 0x36, 0x82, 0x66, 0x14, 0xf6, 0xb7, 0xfe, 0xa0, 0x80, 0xd9, - 0x68, 0xe5, 0x4e, 0x21, 0x55, 0xd4, 0x74, 0xaa, 0x3c, 0x7b, 0x64, 0x9c, 0x14, 0x64, 0xc8, 0xc7, - 0xe5, 0x84, 0xcf, 0x2c, 0x08, 0xe1, 0xcf, 0x40, 0xcd, 0x23, 0x06, 0xd1, 0xa8, 0xed, 0x0a, 0x9f, - 0x5f, 0x1a, 0xd3, 0x67, 0xbc, 0x4b, 0x8c, 0x6d, 0x61, 0xaa, 0x9e, 0x61, 0x4e, 0x87, 0xbf, 0x50, - 0x44, 0x09, 0x7f, 0x02, 0x6a, 0x94, 0x98, 0x8e, 0x81, 0x29, 0x11, 0x69, 0x92, 0x8a, 0x6f, 0x16, - 0x2e, 0x8c, 0x6c, 0xcb, 0xee, 0xdd, 0x16, 0x30, 0x9e, 0x28, 0xd1, 0x3a, 0x84, 0xad, 0x28, 0xa2, - 0x81, 0xfb, 0x60, 0xce, 0x77, 0x7a, 0x0c, 0x49, 0xd9, 0xd1, 0xdd, 0x3f, 0x14, 0xe1, 0x73, 0xe5, - 0xc8, 0x05, 0xd9, 0x49, 0x99, 0xa8, 0x4b, 0x62, 0x80, 0xb9, 0x74, 0x3b, 0xca, 0x50, 0xc3, 0x55, - 0x30, 0x6f, 0xea, 0x16, 0x22, 0xb8, 0x77, 0xb8, 0x4d, 0x34, 0xdb, 0xea, 0x79, 0x3c, 0x80, 0xaa, - 0xea, 0xb2, 0x20, 0x98, 0xdf, 0x48, 0x77, 0xa3, 0x2c, 0x1e, 0xae, 0x83, 0xc5, 0xf0, 0x9c, 0xfd, - 0x91, 0xee, 0x51, 0xdb, 0x3d, 0x5c, 0xd7, 0x4d, 0x9d, 0xd6, 0x27, 0x39, 0x4f, 0x7d, 0x38, 0x68, - 0x2e, 0x22, 0x49, 0x3f, 0x92, 0x5a, 0xb5, 0x7e, 0x3b, 0x09, 0xe6, 0x33, 0xa7, 0x01, 0xbc, 0x03, - 0x96, 0x34, 0xdf, 0x75, 0x89, 0x45, 0x37, 0x7d, 0x73, 0x97, 0xb8, 0xdb, 0xda, 0x1e, 0xe9, 0xf9, - 0x06, 0xe9, 0xf1, 0x1d, 0xad, 0xaa, 0x0d, 0xe1, 0xeb, 0x52, 0x57, 0x8a, 0x42, 0x05, 0xd6, 0xf0, - 0xc7, 0x00, 0x5a, 0xbc, 0x69, 0x43, 0xf7, 0xbc, 0x88, 0xb3, 0xc4, 0x39, 0xa3, 0x04, 0xdc, 0xcc, - 0x21, 0x90, 0xc4, 0x8a, 0xf9, 0xd8, 0x23, 0x9e, 0xee, 0x92, 0x5e, 0xd6, 0xc7, 0x72, 0xda, 0xc7, - 0x35, 0x29, 0x0a, 0x15, 0x58, 0xc3, 0x97, 0xc1, 0x4c, 0x30, 0x1a, 0x5f, 0x73, 0xb1, 0x39, 0x0b, - 0x82, 0x6c, 0x66, 0x33, 0xee, 0x42, 0x49, 0x1c, 0x9b, 0x9a, 0xbd, 0xeb, 0x11, 0xf7, 0x80, 0xf4, - 0xde, 0x0a, 0x34, 0x00, 0x2b, 0x94, 0x55, 0x5e, 0x28, 0xa3, 0xa9, 0xdd, 0xca, 0x21, 0x90, 0xc4, - 0x8a, 0x4d, 0x2d, 0x88, 0x9a, 0xdc, 0xd4, 0x26, 0xd3, 0x53, 0xdb, 0x91, 0xa2, 0x50, 0x81, 0x35, - 0x8b, 0xbd, 0xc0, 0xe5, 0xd5, 0x03, 0xac, 0x1b, 0x78, 0xd7, 0x20, 0xf5, 0xa9, 0x74, 0xec, 0x6d, - 0xa6, 0xbb, 0x51, 0x16, 0x0f, 0xdf, 0x02, 0xe7, 0x82, 0xa6, 0x1d, 0x0b, 0x47, 0x24, 0x35, 0x4e, - 0xf2, 0x8c, 0x20, 0x39, 0xb7, 0x99, 0x05, 0xa0, 0xbc, 0x0d, 0x7c, 0x0d, 0xcc, 0x69, 0xb6, 0x61, - 0xf0, 0x78, 0xec, 0xda, 0xbe, 0x45, 0xeb, 0xd3, 0x9c, 0x05, 0xb2, 0x1c, 0xea, 0xa6, 0x7a, 0x50, - 0x06, 0x09, 0xef, 0x02, 0xa0, 0x85, 0xe5, 0xc0, 0xab, 0x83, 0xe2, 0x42, 0x9f, 0xaf, 0x43, 0x71, - 0x01, 0x8e, 0x9a, 0x3c, 0x94, 0x60, 0x6b, 0x7d, 0xac, 0x80, 0xe5, 0x82, 0x1c, 0x87, 0x6f, 0xa4, - 0xaa, 0xde, 0x95, 0x4c, 0xd5, 0xbb, 0x50, 0x60, 0x96, 0x28, 0x7d, 0x1a, 0x98, 0x65, 0xba, 0x43, - 0xb7, 0xfa, 0x01, 0x44, 0x9c, 0x60, 0x2f, 0xc8, 0x7c, 0x47, 0x49, 0x60, 0x7c, 0x0c, 0x9f, 0x1b, - 0x0e, 0x9a, 0xb3, 0xa9, 0x3e, 0x94, 0xe6, 0x6c, 0xfd, 0xba, 0x04, 0xc0, 0x1a, 0x71, 0x0c, 0xfb, - 0xd0, 0x24, 0xd6, 0x69, 0xa8, 0x96, 0xb5, 0x94, 0x6a, 0x69, 0x49, 0x37, 0x22, 0xf2, 0xa7, 0x50, - 0xb6, 0xac, 0x67, 0x64, 0xcb, 0xa5, 0x11, 0x3c, 0x47, 0xeb, 0x96, 0xbf, 0x97, 0xc1, 0x42, 0x0c, - 0x8e, 0x85, 0xcb, 0xcd, 0xd4, 0x16, 0x3e, 0x97, 0xd9, 0xc2, 0x65, 0x89, 0xc9, 0x13, 0x53, 0x2e, - 0xef, 0x83, 0x39, 0xa6, 0x2b, 0x82, 0x5d, 0xe3, 0xaa, 0x65, 0xf2, 0xd8, 0xaa, 0x25, 0xaa, 0x3a, - 0xeb, 0x29, 0x26, 0x94, 0x61, 0x2e, 0x50, 0x49, 0x53, 0x5f, 0x47, 0x95, 0xf4, 0x47, 0x05, 0xcc, - 0xc5, 0xdb, 0x74, 0x0a, 0x32, 0xa9, 0x9b, 0x96, 0x49, 0x8d, 0xa3, 0xe3, 0xb2, 0x40, 0x27, 0xfd, - 0xad, 0x92, 0xf4, 0x9a, 0x0b, 0xa5, 0x15, 0x76, 0xa1, 0x72, 0x0c, 0x5d, 0xc3, 0x9e, 0x28, 0xab, - 0x67, 0x82, 0xcb, 0x54, 0xd0, 0x86, 0xa2, 0xde, 0x94, 0xa4, 0x2a, 0x3d, 0x59, 0x49, 0x55, 0xfe, - 0x62, 0x24, 0xd5, 0x6d, 0x50, 0xf3, 0x42, 0x31, 0x55, 0xe1, 0x94, 0x97, 0x47, 0xa5, 0xb3, 0xd0, - 0x51, 0x11, 0x6b, 0xa4, 0xa0, 0x22, 0x26, 0x99, 0x76, 0xaa, 0x7e, 0x99, 0xda, 0x89, 0x85, 0xb7, - 0x83, 0x7d, 0x8f, 0xf4, 0x78, 0x2a, 0xd5, 0xe2, 0xf0, 0xde, 0xe2, 0xad, 0x48, 0xf4, 0xc2, 0x1d, - 0xb0, 0xec, 0xb8, 0x76, 0xdf, 0x25, 0x9e, 0xb7, 0x46, 0x70, 0xcf, 0xd0, 0x2d, 0x12, 0x4e, 0x20, - 0xa8, 0x7a, 0x17, 0x86, 0x83, 0xe6, 0xf2, 0x96, 0x1c, 0x82, 0x8a, 0x6c, 0x5b, 0x1f, 0x55, 0xc0, - 0xd9, 0xec, 0x89, 0x58, 0x20, 0x44, 0x94, 0x13, 0x09, 0x91, 0x17, 0x13, 0x21, 0x1a, 0xa8, 0xb4, - 0xc4, 0x9d, 0x3f, 0x17, 0xa6, 0xab, 0x60, 0x5e, 0x08, 0x8f, 0xb0, 0x53, 0x48, 0xb1, 0x68, 0x7b, - 0x76, 0xd2, 0xdd, 0x28, 0x8b, 0x87, 0x37, 0xc1, 0xac, 0xcb, 0xb5, 0x55, 0x48, 0x10, 0xe8, 0x93, - 0x6f, 0x09, 0x82, 0x59, 0x94, 0xec, 0x44, 0x69, 0x2c, 0xd3, 0x26, 0xb1, 0xe4, 0x08, 0x09, 0x2a, - 0x69, 0x6d, 0xb2, 0x9a, 0x05, 0xa0, 0xbc, 0x0d, 0xdc, 0x00, 0x0b, 0xbe, 0x95, 0xa7, 0x0a, 0x62, - 0xed, 0x82, 0xa0, 0x5a, 0xd8, 0xc9, 0x43, 0x90, 0xcc, 0x0e, 0xfe, 0x34, 0x25, 0x57, 0x26, 0xf9, - 0x29, 0xf2, 0xdc, 0xd1, 0xe9, 0x30, 0xb6, 0x5e, 0x91, 0xe8, 0xa8, 0xda, 0xb8, 0x3a, 0xaa, 0xf5, - 0xa1, 0x02, 0x60, 0x3e, 0x05, 0x47, 0x5e, 0xee, 0x73, 0x16, 0x89, 0x12, 0xd9, 0x93, 0x2b, 0x9c, - 0x2b, 0xa3, 0x15, 0x4e, 0x7c, 0x82, 0x8e, 0x27, 0x71, 0xc4, 0xf2, 0x9e, 0xce, 0xc3, 0xcc, 0x18, - 0x12, 0x27, 0xf6, 0xe7, 0xf1, 0x24, 0x4e, 0x82, 0xe7, 0x68, 0x89, 0xf3, 0xaf, 0x12, 0x58, 0x88, - 0xc1, 0x63, 0x4b, 0x1c, 0x89, 0xc9, 0xd3, 0xc7, 0x99, 0xf1, 0x64, 0x47, 0xbc, 0x74, 0x5f, 0x11, - 0xd9, 0x11, 0x3b, 0x54, 0x20, 0x3b, 0x7e, 0x5f, 0x4a, 0x7a, 0x7d, 0x4c, 0xd9, 0xf1, 0x05, 0x3c, - 0x55, 0x7c, 0xed, 0x94, 0x4b, 0xeb, 0x93, 0x32, 0x38, 0x9b, 0x4d, 0xc1, 0x54, 0x1d, 0x54, 0x46, - 0xd6, 0xc1, 0x2d, 0xb0, 0x78, 0xcf, 0x37, 0x8c, 0x43, 0x3e, 0x87, 0x44, 0x31, 0x0c, 0x2a, 0xe8, - 0xb7, 0x85, 0xe5, 0xe2, 0x0f, 0x25, 0x18, 0x24, 0xb5, 0xcc, 0x97, 0xc5, 0xca, 0xe3, 0x96, 0xc5, - 0xea, 0x09, 0xca, 0xa2, 0x5c, 0x59, 0x94, 0x4f, 0xa4, 0x2c, 0xc6, 0xae, 0x89, 0x92, 0xe3, 0x6a, - 0xe4, 0x1d, 0x7e, 0xa8, 0x80, 0x25, 0xf9, 0xf5, 0x19, 0x1a, 0x60, 0xce, 0xc4, 0xf7, 0x93, 0x8f, - 0x17, 0xa3, 0x0a, 0x86, 0x4f, 0x75, 0xa3, 0x1d, 0x7c, 0xdd, 0x69, 0xbf, 0x6d, 0xd1, 0x5b, 0xee, - 0x36, 0x75, 0x75, 0xab, 0x1f, 0x14, 0xd8, 0x8d, 0x14, 0x17, 0xca, 0x70, 0xc3, 0xbb, 0xa0, 0x66, - 0xe2, 0xfb, 0xdb, 0xbe, 0xdb, 0x0f, 0x0b, 0xe1, 0xf1, 0xc7, 0xe1, 0xb1, 0xbf, 0x21, 0x58, 0x50, - 0xc4, 0xd7, 0xfa, 0x5c, 0x01, 0xcb, 0x05, 0x15, 0xf4, 0x1b, 0x34, 0xcb, 0x8f, 0x14, 0x70, 0x31, - 0x35, 0x4b, 0x96, 0x91, 0xe4, 0x9e, 0x6f, 0xf0, 0xe4, 0x14, 0x82, 0xe5, 0x0a, 0x98, 0x76, 0xb0, - 0x4b, 0xf5, 0x48, 0xe9, 0x56, 0xd5, 0xd9, 0xe1, 0xa0, 0x39, 0xbd, 0x15, 0x36, 0xa2, 0xb8, 0x5f, - 0xb2, 0x36, 0xa5, 0x27, 0xb7, 0x36, 0xad, 0xdf, 0x94, 0xc0, 0x4c, 0xc2, 0xe5, 0x53, 0x90, 0x2a, - 0x6f, 0xa6, 0xa4, 0x8a, 0xf4, 0xe3, 0x4f, 0x72, 0x0d, 0x8b, 0xb4, 0xca, 0x46, 0x46, 0xab, 0x7c, - 0x6f, 0x14, 0xd1, 0xd1, 0x62, 0xe5, 0xdf, 0x25, 0xb0, 0x98, 0x40, 0xc7, 0x6a, 0xe5, 0x07, 0x29, - 0xb5, 0xb2, 0x92, 0x51, 0x2b, 0x75, 0x99, 0xcd, 0x53, 0xb9, 0x32, 0x5a, 0xae, 0xfc, 0x49, 0x01, - 0xf3, 0x89, 0xb5, 0x3b, 0x05, 0xbd, 0xb2, 0x96, 0xd6, 0x2b, 0xcd, 0x11, 0xf1, 0x52, 0x20, 0x58, - 0xfe, 0xa3, 0x80, 0x4e, 0x02, 0xb5, 0x45, 0x5c, 0x4f, 0xf7, 0x28, 0xb1, 0xe8, 0x1d, 0xdb, 0xf0, - 0x4d, 0xd2, 0x35, 0xb0, 0x6e, 0x22, 0xc2, 0x1a, 0x74, 0xdb, 0xda, 0xb2, 0x0d, 0x5d, 0x3b, 0x84, - 0x18, 0xcc, 0x7c, 0xb0, 0x47, 0xac, 0x35, 0x62, 0x10, 0x2a, 0x3e, 0x51, 0x4c, 0xab, 0x6f, 0x84, - 0x2f, 0xf6, 0xef, 0xc6, 0x5d, 0x8f, 0x06, 0xcd, 0x95, 0x71, 0x18, 0x79, 0x80, 0x25, 0x39, 0xe1, - 0xcf, 0x01, 0x60, 0x3f, 0xb7, 0x35, 0x1c, 0x7e, 0xb0, 0x98, 0x56, 0x5f, 0x0f, 0xd3, 0xf0, 0xdd, - 0xa8, 0xe7, 0x58, 0x03, 0x24, 0x18, 0x5b, 0x7f, 0x9d, 0x4a, 0x6d, 0xd7, 0x37, 0xfe, 0x7d, 0xe8, - 0x97, 0x60, 0xf1, 0x20, 0x5e, 0x9d, 0x10, 0xc0, 0x74, 0x0d, 0x8b, 0x9d, 0xe7, 0xa5, 0xf4, 0xb2, - 0x75, 0x8d, 0xd5, 0xd4, 0x1d, 0x09, 0x1d, 0x92, 0x0e, 0x02, 0x5f, 0x06, 0x33, 0x4c, 0x8f, 0xe8, - 0x1a, 0xd9, 0xc4, 0x66, 0x98, 0x4a, 0xd1, 0x17, 0x9e, 0xed, 0xb8, 0x0b, 0x25, 0x71, 0x70, 0x0f, - 0x2c, 0x38, 0x76, 0x6f, 0x03, 0x5b, 0xb8, 0x4f, 0x58, 0x95, 0x0d, 0xb6, 0x92, 0xbf, 0x1c, 0x4d, - 0xab, 0xaf, 0x84, 0xaf, 0x02, 0x5b, 0x79, 0x08, 0xbb, 0x75, 0x49, 0x9a, 0x79, 0x10, 0xc8, 0x28, - 0xa1, 0x99, 0xfb, 0x20, 0x39, 0x95, 0xfb, 0x2f, 0x0e, 0x59, 0x4e, 0x9d, 0xf0, 0x93, 0x64, 0xd1, - 0x9b, 0x58, 0xed, 0x44, 0x6f, 0x62, 0x92, 0x5b, 0xc3, 0xf4, 0x31, 0x6f, 0x0d, 0x9f, 0x28, 0xe0, - 0x92, 0x33, 0x46, 0x1a, 0xd5, 0x01, 0x5f, 0x96, 0xee, 0x88, 0x65, 0x19, 0x27, 0x23, 0xd5, 0x95, - 0xe1, 0xa0, 0x79, 0x69, 0x1c, 0x24, 0x1a, 0xcb, 0xb5, 0xd6, 0x87, 0x55, 0x70, 0x2e, 0x57, 0x1e, - 0xbf, 0xc4, 0xc7, 0xb9, 0xdc, 0x15, 0xa2, 0x7c, 0x8c, 0x2b, 0xc4, 0x2a, 0x98, 0x17, 0x5f, 0x74, - 0x33, 0x37, 0x90, 0x68, 0x4f, 0xbb, 0xe9, 0x6e, 0x94, 0xc5, 0xcb, 0x1e, 0x07, 0xab, 0xc7, 0x7c, - 0x1c, 0x4c, 0x7a, 0x21, 0xfe, 0x11, 0x29, 0x48, 0xbe, 0xbc, 0x17, 0xe2, 0xff, 0x91, 0xb2, 0x78, - 0xf8, 0x7a, 0x98, 0x59, 0x11, 0xc3, 0x14, 0x67, 0xc8, 0xa4, 0x4a, 0x44, 0x90, 0x41, 0x3f, 0xd6, - 0x57, 0xcb, 0xf7, 0x24, 0x5f, 0x2d, 0x57, 0x46, 0x84, 0xee, 0xf8, 0xef, 0x80, 0xd2, 0x5b, 0xde, - 0xcc, 0xf1, 0x6f, 0x79, 0xad, 0xbf, 0x28, 0xe0, 0x99, 0xc2, 0x33, 0x05, 0xae, 0xa6, 0xe4, 0xda, - 0xd5, 0x8c, 0x5c, 0x7b, 0xb6, 0xd0, 0x30, 0xa1, 0xd9, 0x4c, 0xf9, 0x13, 0xe1, 0x8d, 0x91, 0x4f, - 0x84, 0x12, 0xe9, 0x3f, 0xfa, 0xad, 0x50, 0x7d, 0xf5, 0xc1, 0xc3, 0xc6, 0xc4, 0xa7, 0x0f, 0x1b, - 0x13, 0x9f, 0x3d, 0x6c, 0x4c, 0xfc, 0x6a, 0xd8, 0x50, 0x1e, 0x0c, 0x1b, 0xca, 0xa7, 0xc3, 0x86, - 0xf2, 0xd9, 0xb0, 0xa1, 0xfc, 0x63, 0xd8, 0x50, 0x7e, 0xf7, 0x79, 0x63, 0xe2, 0x2e, 0xcc, 0xff, - 0x1b, 0xe4, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x92, 0xcf, 0x5d, 0xcc, 0x34, 0x29, 0x00, 0x00, + // 2211 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xd7, 0xf2, 0x43, 0xa2, 0x86, 0x96, 0x64, 0x8f, 0x54, 0x89, 0xb1, 0x1b, 0xd2, 0xdd, 0xb8, + 0xb6, 0x12, 0xc7, 0x64, 0xed, 0x38, 0x41, 0x60, 0x17, 0x09, 0x44, 0x2a, 0x4d, 0xd3, 0xe8, 0xab, + 0x43, 0xcb, 0x01, 0xdc, 0xb4, 0xe8, 0x88, 0x1c, 0x53, 0x1b, 0xed, 0x17, 0x76, 0x87, 0x8a, 0x89, + 0x5e, 0x8a, 0x02, 0xbd, 0xf5, 0xd0, 0xbf, 0xa1, 0xff, 0x40, 0x51, 0x14, 0xcd, 0x2d, 0x08, 0x82, + 0x5e, 0x7c, 0x29, 0x10, 0xf4, 0xd2, 0x9c, 0x88, 0x9a, 0x39, 0x15, 0x45, 0x6f, 0xed, 0xc5, 0x97, + 0x16, 0x33, 0x3b, 0xfb, 0x3d, 0x2b, 0x52, 0x72, 0xac, 0x34, 0x81, 0x6f, 0xdc, 0x99, 0xdf, 0xfb, + 0xed, 0x9b, 0x99, 0xf7, 0xe6, 0xfd, 0x66, 0x96, 0xe0, 0xf6, 0xc1, 0xeb, 0x6e, 0x5d, 0xb3, 0x1a, + 0x07, 0xfd, 0x3d, 0xe2, 0x98, 0x84, 0x12, 0xb7, 0x71, 0x48, 0xcc, 0xae, 0xe5, 0x34, 0x44, 0x07, + 0xb6, 0xb5, 0x06, 0xb6, 0x6d, 0xb7, 0x71, 0x78, 0xbd, 0xd1, 0x23, 0x26, 0x71, 0x30, 0x25, 0xdd, + 0xba, 0xed, 0x58, 0xd4, 0x82, 0xd0, 0xc3, 0xd4, 0xb1, 0xad, 0xd5, 0x19, 0xa6, 0x7e, 0x78, 0xfd, + 0xfc, 0xb5, 0x9e, 0x46, 0xf7, 0xfb, 0x7b, 0xf5, 0x8e, 0x65, 0x34, 0x7a, 0x56, 0xcf, 0x6a, 0x70, + 0xe8, 0x5e, 0xff, 0x3e, 0x7f, 0xe2, 0x0f, 0xfc, 0x97, 0x47, 0x71, 0x5e, 0x8d, 0xbc, 0xa6, 0x63, + 0x39, 0x44, 0xf2, 0x9a, 0xf3, 0x37, 0x43, 0x8c, 0x81, 0x3b, 0xfb, 0x9a, 0x49, 0x9c, 0x41, 0xc3, + 0x3e, 0xe8, 0xb1, 0x06, 0xb7, 0x61, 0x10, 0x8a, 0x65, 0x56, 0x8d, 0x2c, 0x2b, 0xa7, 0x6f, 0x52, + 0xcd, 0x20, 0x29, 0x83, 0xd7, 0xc6, 0x19, 0xb8, 0x9d, 0x7d, 0x62, 0xe0, 0x94, 0xdd, 0x2b, 0x59, + 0x76, 0x7d, 0xaa, 0xe9, 0x0d, 0xcd, 0xa4, 0x2e, 0x75, 0x92, 0x46, 0xea, 0x7f, 0x14, 0x00, 0x5b, + 0x96, 0x49, 0x1d, 0x4b, 0xd7, 0x89, 0x83, 0xc8, 0xa1, 0xe6, 0x6a, 0x96, 0x09, 0x7f, 0x0e, 0x4a, + 0x6c, 0x3c, 0x5d, 0x4c, 0x71, 0x45, 0xb9, 0xa8, 0xac, 0x96, 0x6f, 0x7c, 0xaf, 0x1e, 0x4e, 0x72, + 0x40, 0x5f, 0xb7, 0x0f, 0x7a, 0xac, 0xc1, 0xad, 0x33, 0x74, 0xfd, 0xf0, 0x7a, 0x7d, 0x7b, 0xef, + 0x03, 0xd2, 0xa1, 0x9b, 0x84, 0xe2, 0x26, 0x7c, 0x38, 0xac, 0x4d, 0x8d, 0x86, 0x35, 0x10, 0xb6, + 0xa1, 0x80, 0x15, 0x6e, 0x83, 0x02, 0x67, 0xcf, 0x71, 0xf6, 0x6b, 0x99, 0xec, 0x62, 0xd0, 0x75, + 0x84, 0x3f, 0x7c, 0xeb, 0x01, 0x25, 0x26, 0x73, 0xaf, 0x79, 0x46, 0x50, 0x17, 0xd6, 0x31, 0xc5, + 0x88, 0x13, 0xc1, 0x97, 0x41, 0xc9, 0x11, 0xee, 0x57, 0xf2, 0x17, 0x95, 0xd5, 0x7c, 0xf3, 0xac, + 0x40, 0x95, 0xfc, 0x61, 0xa1, 0x00, 0xa1, 0xfe, 0x59, 0x01, 0xcb, 0xe9, 0x71, 0x6f, 0x68, 0x2e, + 0x85, 0xef, 0xa7, 0xc6, 0x5e, 0x9f, 0x6c, 0xec, 0xcc, 0x9a, 0x8f, 0x3c, 0x78, 0xb1, 0xdf, 0x12, + 0x19, 0xf7, 0xbb, 0xa0, 0xa8, 0x51, 0x62, 0xb8, 0x95, 0xdc, 0xc5, 0xfc, 0x6a, 0xf9, 0xc6, 0xe5, + 0x7a, 0x3a, 0x76, 0xeb, 0x69, 0xc7, 0x9a, 0x73, 0x82, 0xb2, 0xf8, 0x0e, 0x33, 0x46, 0x1e, 0x87, + 0xfa, 0x5f, 0x05, 0xcc, 0xae, 0x63, 0x62, 0x58, 0x66, 0x9b, 0xd0, 0x53, 0x58, 0xb4, 0x16, 0x28, + 0xb8, 0x36, 0xe9, 0x88, 0x45, 0xfb, 0x8e, 0xcc, 0xf7, 0xc0, 0x9d, 0xb6, 0x4d, 0x3a, 0xe1, 0x42, + 0xb1, 0x27, 0xc4, 0x8d, 0xe1, 0xbb, 0x60, 0xda, 0xa5, 0x98, 0xf6, 0x5d, 0xbe, 0x4c, 0xe5, 0x1b, + 0x2f, 0x1c, 0x4d, 0xc3, 0xa1, 0xcd, 0x79, 0x41, 0x34, 0xed, 0x3d, 0x23, 0x41, 0xa1, 0xfe, 0x23, + 0x07, 0x60, 0x80, 0x6d, 0x59, 0x66, 0x57, 0xa3, 0x2c, 0x7e, 0x6f, 0x81, 0x02, 0x1d, 0xd8, 0x84, + 0x4f, 0xc3, 0x6c, 0xf3, 0xb2, 0xef, 0xc5, 0x9d, 0x81, 0x4d, 0x1e, 0x0f, 0x6b, 0xcb, 0x69, 0x0b, + 0xd6, 0x83, 0xb8, 0x0d, 0xdc, 0x08, 0xfc, 0xcb, 0x71, 0xeb, 0x9b, 0xf1, 0x57, 0x3f, 0x1e, 0xd6, + 0x24, 0x9b, 0x45, 0x3d, 0x60, 0x8a, 0x3b, 0x08, 0x0f, 0x01, 0xd4, 0xb1, 0x4b, 0xef, 0x38, 0xd8, + 0x74, 0xbd, 0x37, 0x69, 0x06, 0x11, 0x23, 0x7f, 0x69, 0xb2, 0xe5, 0x61, 0x16, 0xcd, 0xf3, 0xc2, + 0x0b, 0xb8, 0x91, 0x62, 0x43, 0x92, 0x37, 0xc0, 0xcb, 0x60, 0xda, 0x21, 0xd8, 0xb5, 0xcc, 0x4a, + 0x81, 0x8f, 0x22, 0x98, 0x40, 0xc4, 0x5b, 0x91, 0xe8, 0x85, 0x2f, 0x82, 0x19, 0x83, 0xb8, 0x2e, + 0xee, 0x91, 0x4a, 0x91, 0x03, 0x17, 0x04, 0x70, 0x66, 0xd3, 0x6b, 0x46, 0x7e, 0xbf, 0xfa, 0x07, + 0x05, 0xcc, 0x05, 0x33, 0x77, 0x0a, 0xa9, 0xd2, 0x8c, 0xa7, 0xca, 0xf3, 0x47, 0xc6, 0x49, 0x46, + 0x86, 0x7c, 0x92, 0x8f, 0xf8, 0xcc, 0x82, 0x10, 0xfe, 0x14, 0x94, 0x5c, 0xa2, 0x93, 0x0e, 0xb5, + 0x1c, 0xe1, 0xf3, 0x2b, 0x13, 0xfa, 0x8c, 0xf7, 0x88, 0xde, 0x16, 0xa6, 0xcd, 0x33, 0xcc, 0x69, + 0xff, 0x09, 0x05, 0x94, 0xf0, 0xc7, 0xa0, 0x44, 0x89, 0x61, 0xeb, 0x98, 0x12, 0x91, 0x26, 0xb1, + 0xf8, 0x66, 0xe1, 0xc2, 0xc8, 0x76, 0xac, 0xee, 0x1d, 0x01, 0xe3, 0x89, 0x12, 0xcc, 0x83, 0xdf, + 0x8a, 0x02, 0x1a, 0x78, 0x00, 0xe6, 0xfb, 0x76, 0x97, 0x21, 0x29, 0xdb, 0xba, 0x7b, 0x03, 0x11, + 0x3e, 0x57, 0x8f, 0x9c, 0x90, 0xdd, 0x98, 0x49, 0x73, 0x59, 0xbc, 0x60, 0x3e, 0xde, 0x8e, 0x12, + 0xd4, 0x70, 0x0d, 0x2c, 0x18, 0x9a, 0x89, 0x08, 0xee, 0x0e, 0xda, 0xa4, 0x63, 0x99, 0x5d, 0x97, + 0x07, 0x50, 0xb1, 0xb9, 0x22, 0x08, 0x16, 0x36, 0xe3, 0xdd, 0x28, 0x89, 0x87, 0x1b, 0x60, 0xc9, + 0xdf, 0x67, 0x7f, 0xa8, 0xb9, 0xd4, 0x72, 0x06, 0x1b, 0x9a, 0xa1, 0xd1, 0xca, 0x34, 0xe7, 0xa9, + 0x8c, 0x86, 0xb5, 0x25, 0x24, 0xe9, 0x47, 0x52, 0x2b, 0xf5, 0x37, 0xd3, 0x60, 0x21, 0xb1, 0x1b, + 0xc0, 0xbb, 0x60, 0xb9, 0xd3, 0x77, 0x1c, 0x62, 0xd2, 0xad, 0xbe, 0xb1, 0x47, 0x9c, 0x76, 0x67, + 0x9f, 0x74, 0xfb, 0x3a, 0xe9, 0xf2, 0x15, 0x2d, 0x36, 0xab, 0xc2, 0xd7, 0xe5, 0x96, 0x14, 0x85, + 0x32, 0xac, 0xe1, 0x8f, 0x00, 0x34, 0x79, 0xd3, 0xa6, 0xe6, 0xba, 0x01, 0x67, 0x8e, 0x73, 0x06, + 0x09, 0xb8, 0x95, 0x42, 0x20, 0x89, 0x15, 0xf3, 0xb1, 0x4b, 0x5c, 0xcd, 0x21, 0xdd, 0xa4, 0x8f, + 0xf9, 0xb8, 0x8f, 0xeb, 0x52, 0x14, 0xca, 0xb0, 0x86, 0xaf, 0x82, 0xb2, 0xf7, 0x36, 0x3e, 0xe7, + 0x62, 0x71, 0x16, 0x05, 0x59, 0x79, 0x2b, 0xec, 0x42, 0x51, 0x1c, 0x1b, 0x9a, 0xb5, 0xe7, 0x12, + 0xe7, 0x90, 0x74, 0xdf, 0xf6, 0x34, 0x00, 0x2b, 0x94, 0x45, 0x5e, 0x28, 0x83, 0xa1, 0x6d, 0xa7, + 0x10, 0x48, 0x62, 0xc5, 0x86, 0xe6, 0x45, 0x4d, 0x6a, 0x68, 0xd3, 0xf1, 0xa1, 0xed, 0x4a, 0x51, + 0x28, 0xc3, 0x9a, 0xc5, 0x9e, 0xe7, 0xf2, 0xda, 0x21, 0xd6, 0x74, 0xbc, 0xa7, 0x93, 0xca, 0x4c, + 0x3c, 0xf6, 0xb6, 0xe2, 0xdd, 0x28, 0x89, 0x87, 0x6f, 0x83, 0x73, 0x5e, 0xd3, 0xae, 0x89, 0x03, + 0x92, 0x12, 0x27, 0x79, 0x4e, 0x90, 0x9c, 0xdb, 0x4a, 0x02, 0x50, 0xda, 0x06, 0xde, 0x02, 0xf3, + 0x1d, 0x4b, 0xd7, 0x79, 0x3c, 0xb6, 0xac, 0xbe, 0x49, 0x2b, 0xb3, 0x9c, 0x05, 0xb2, 0x1c, 0x6a, + 0xc5, 0x7a, 0x50, 0x02, 0x09, 0xef, 0x01, 0xd0, 0xf1, 0xcb, 0x81, 0x5b, 0x01, 0xd9, 0x85, 0x3e, + 0x5d, 0x87, 0xc2, 0x02, 0x1c, 0x34, 0xb9, 0x28, 0xc2, 0xa6, 0x7e, 0xa2, 0x80, 0x95, 0x8c, 0x1c, + 0x87, 0x6f, 0xc6, 0xaa, 0xde, 0xd5, 0x44, 0xd5, 0xbb, 0x90, 0x61, 0x16, 0x29, 0x7d, 0x1d, 0x30, + 0xc7, 0x74, 0x87, 0x66, 0xf6, 0x3c, 0x88, 0xd8, 0xc1, 0x5e, 0x92, 0xf9, 0x8e, 0xa2, 0xc0, 0x70, + 0x1b, 0x3e, 0x37, 0x1a, 0xd6, 0xe6, 0x62, 0x7d, 0x28, 0xce, 0xa9, 0xfe, 0x2a, 0x07, 0xc0, 0x3a, + 0xb1, 0x75, 0x6b, 0x60, 0x10, 0xf3, 0x34, 0x54, 0xcb, 0x7a, 0x4c, 0xb5, 0xa8, 0xd2, 0x85, 0x08, + 0xfc, 0xc9, 0x94, 0x2d, 0x1b, 0x09, 0xd9, 0x72, 0x69, 0x0c, 0xcf, 0xd1, 0xba, 0xe5, 0x6f, 0x79, + 0xb0, 0x18, 0x82, 0x43, 0xe1, 0x72, 0x3b, 0xb6, 0x84, 0x57, 0x12, 0x4b, 0xb8, 0x22, 0x31, 0x79, + 0x6a, 0xca, 0xe5, 0x03, 0x30, 0xcf, 0x74, 0x85, 0xb7, 0x6a, 0x5c, 0xb5, 0x4c, 0x1f, 0x5b, 0xb5, + 0x04, 0x55, 0x67, 0x23, 0xc6, 0x84, 0x12, 0xcc, 0x19, 0x2a, 0x69, 0xe6, 0xeb, 0xa8, 0x92, 0xfe, + 0xa8, 0x80, 0xf9, 0x70, 0x99, 0x4e, 0x41, 0x26, 0xb5, 0xe2, 0x32, 0xa9, 0x7a, 0x74, 0x5c, 0x66, + 0xe8, 0xa4, 0xbf, 0x16, 0xa2, 0x5e, 0x73, 0xa1, 0xb4, 0xca, 0x0e, 0x54, 0xb6, 0xae, 0x75, 0xb0, + 0x2b, 0xca, 0xea, 0x19, 0xef, 0x30, 0xe5, 0xb5, 0xa1, 0xa0, 0x37, 0x26, 0xa9, 0x72, 0x4f, 0x57, + 0x52, 0xe5, 0xbf, 0x1c, 0x49, 0x75, 0x07, 0x94, 0x5c, 0x5f, 0x4c, 0x15, 0x38, 0xe5, 0xe5, 0x71, + 0xe9, 0x2c, 0x74, 0x54, 0xc0, 0x1a, 0x28, 0xa8, 0x80, 0x49, 0xa6, 0x9d, 0x8a, 0x5f, 0xa5, 0x76, + 0x62, 0xe1, 0x6d, 0xe3, 0xbe, 0x4b, 0xba, 0x3c, 0x95, 0x4a, 0x61, 0x78, 0xef, 0xf0, 0x56, 0x24, + 0x7a, 0xe1, 0x2e, 0x58, 0xb1, 0x1d, 0xab, 0xe7, 0x10, 0xd7, 0x5d, 0x27, 0xb8, 0xab, 0x6b, 0x26, + 0xf1, 0x07, 0xe0, 0x55, 0xbd, 0x0b, 0xa3, 0x61, 0x6d, 0x65, 0x47, 0x0e, 0x41, 0x59, 0xb6, 0xea, + 0xc7, 0x05, 0x70, 0x36, 0xb9, 0x23, 0x66, 0x08, 0x11, 0xe5, 0x44, 0x42, 0xe4, 0xe5, 0x48, 0x88, + 0x7a, 0x2a, 0x2d, 0x72, 0xe6, 0x4f, 0x85, 0xe9, 0x1a, 0x58, 0x10, 0xc2, 0xc3, 0xef, 0x14, 0x52, + 0x2c, 0x58, 0x9e, 0xdd, 0x78, 0x37, 0x4a, 0xe2, 0xe1, 0x6d, 0x30, 0xe7, 0x70, 0x6d, 0xe5, 0x13, + 0x78, 0xfa, 0xe4, 0x5b, 0x82, 0x60, 0x0e, 0x45, 0x3b, 0x51, 0x1c, 0xcb, 0xb4, 0x49, 0x28, 0x39, + 0x7c, 0x82, 0x42, 0x5c, 0x9b, 0xac, 0x25, 0x01, 0x28, 0x6d, 0x03, 0x37, 0xc1, 0x62, 0xdf, 0x4c, + 0x53, 0x79, 0xb1, 0x76, 0x41, 0x50, 0x2d, 0xee, 0xa6, 0x21, 0x48, 0x66, 0x07, 0x7f, 0x12, 0x93, + 0x2b, 0xd3, 0x7c, 0x17, 0xb9, 0x72, 0x74, 0x3a, 0x4c, 0xac, 0x57, 0x24, 0x3a, 0xaa, 0x34, 0xa9, + 0x8e, 0x52, 0x3f, 0x52, 0x00, 0x4c, 0xa7, 0xe0, 0xd8, 0xc3, 0x7d, 0xca, 0x22, 0x52, 0x22, 0xbb, + 0x72, 0x85, 0x73, 0x75, 0xbc, 0xc2, 0x09, 0x77, 0xd0, 0xc9, 0x24, 0x8e, 0x98, 0xde, 0xd3, 0xb9, + 0x98, 0x99, 0x40, 0xe2, 0x84, 0xfe, 0x3c, 0x99, 0xc4, 0x89, 0xf0, 0x1c, 0x2d, 0x71, 0xfe, 0x99, + 0x03, 0x8b, 0x21, 0x78, 0x62, 0x89, 0x23, 0x31, 0x79, 0x76, 0x39, 0x33, 0x99, 0xec, 0x08, 0xa7, + 0xee, 0xff, 0x44, 0x76, 0x84, 0x0e, 0x65, 0xc8, 0x8e, 0xdf, 0xe7, 0xa2, 0x5e, 0x1f, 0x53, 0x76, + 0x7c, 0x09, 0x57, 0x15, 0x5f, 0x3b, 0xe5, 0xa2, 0x7e, 0x9a, 0x07, 0x67, 0x93, 0x29, 0x18, 0xab, + 0x83, 0xca, 0xd8, 0x3a, 0xb8, 0x03, 0x96, 0xee, 0xf7, 0x75, 0x7d, 0xc0, 0xc7, 0x10, 0x29, 0x86, + 0x5e, 0x05, 0xfd, 0xb6, 0xb0, 0x5c, 0xfa, 0x81, 0x04, 0x83, 0xa4, 0x96, 0xe9, 0xb2, 0x58, 0x78, + 0xd2, 0xb2, 0x58, 0x3c, 0x41, 0x59, 0x94, 0x2b, 0x8b, 0xfc, 0x89, 0x94, 0xc5, 0xc4, 0x35, 0x51, + 0xb2, 0x5d, 0x8d, 0x3d, 0xc3, 0x8f, 0x14, 0xb0, 0x2c, 0x3f, 0x3e, 0x43, 0x1d, 0xcc, 0x1b, 0xf8, + 0x41, 0xf4, 0xf2, 0x62, 0x5c, 0xc1, 0xe8, 0x53, 0x4d, 0xaf, 0x7b, 0x5f, 0x77, 0xea, 0xef, 0x98, + 0x74, 0xdb, 0x69, 0x53, 0x47, 0x33, 0x7b, 0x5e, 0x81, 0xdd, 0x8c, 0x71, 0xa1, 0x04, 0x37, 0xbc, + 0x07, 0x4a, 0x06, 0x7e, 0xd0, 0xee, 0x3b, 0x3d, 0xbf, 0x10, 0x1e, 0xff, 0x3d, 0x3c, 0xf6, 0x37, + 0x05, 0x0b, 0x0a, 0xf8, 0xd4, 0x2f, 0x14, 0xb0, 0x92, 0x51, 0x41, 0xbf, 0x41, 0xa3, 0xfc, 0x58, + 0x01, 0x17, 0x63, 0xa3, 0x64, 0x19, 0x49, 0xee, 0xf7, 0x75, 0x9e, 0x9c, 0x42, 0xb0, 0x5c, 0x05, + 0xb3, 0x36, 0x76, 0xa8, 0x16, 0x28, 0xdd, 0x62, 0x73, 0x6e, 0x34, 0xac, 0xcd, 0xee, 0xf8, 0x8d, + 0x28, 0xec, 0x97, 0xcc, 0x4d, 0xee, 0xe9, 0xcd, 0x8d, 0xfa, 0xeb, 0x1c, 0x28, 0x47, 0x5c, 0x3e, + 0x05, 0xa9, 0xf2, 0x56, 0x4c, 0xaa, 0x48, 0x3f, 0xfe, 0x44, 0xe7, 0x30, 0x4b, 0xab, 0x6c, 0x26, + 0xb4, 0xca, 0x77, 0xc7, 0x11, 0x1d, 0x2d, 0x56, 0xfe, 0x95, 0x03, 0x4b, 0x11, 0x74, 0xa8, 0x56, + 0xbe, 0x1f, 0x53, 0x2b, 0xab, 0x09, 0xb5, 0x52, 0x91, 0xd9, 0x3c, 0x93, 0x2b, 0xe3, 0xe5, 0xca, + 0x9f, 0x14, 0xb0, 0x10, 0x99, 0xbb, 0x53, 0xd0, 0x2b, 0xeb, 0x71, 0xbd, 0x52, 0x1b, 0x13, 0x2f, + 0x19, 0x82, 0xe5, 0x16, 0x58, 0x8c, 0x80, 0xb6, 0x9d, 0xae, 0x66, 0x62, 0xdd, 0x85, 0x2f, 0x80, + 0xa2, 0x4b, 0xb1, 0x43, 0xfd, 0xec, 0xf6, 0x6d, 0xdb, 0xac, 0x11, 0x79, 0x7d, 0xea, 0xbf, 0x15, + 0xd0, 0x88, 0x18, 0xef, 0x10, 0xc7, 0xd5, 0x5c, 0x4a, 0x4c, 0x7a, 0xd7, 0xd2, 0xfb, 0x06, 0x69, + 0xe9, 0x58, 0x33, 0x10, 0x61, 0x0d, 0x9a, 0x65, 0xee, 0x58, 0xba, 0xd6, 0x19, 0x40, 0x0c, 0xca, + 0x1f, 0xee, 0x13, 0x73, 0x9d, 0xe8, 0x84, 0x8a, 0xcf, 0x1b, 0xb3, 0xcd, 0x37, 0xfd, 0xdb, 0xfe, + 0xf7, 0xc2, 0xae, 0xc7, 0xc3, 0xda, 0xea, 0x24, 0x8c, 0x3c, 0x38, 0xa3, 0x9c, 0xf0, 0x67, 0x00, + 0xb0, 0xc7, 0x76, 0x07, 0xfb, 0x1f, 0x3b, 0x66, 0x9b, 0x6f, 0xf8, 0x29, 0xfc, 0x5e, 0xd0, 0x73, + 0xac, 0x17, 0x44, 0x18, 0xd5, 0xdf, 0x95, 0x62, 0x4b, 0xfd, 0x8d, 0xbf, 0x5b, 0xfa, 0x05, 0x58, + 0x3a, 0x0c, 0x67, 0xc7, 0x07, 0x30, 0x4d, 0xc4, 0xe2, 0xee, 0x45, 0x29, 0xbd, 0x6c, 0x5e, 0x43, + 0x25, 0x76, 0x57, 0x42, 0x87, 0xa4, 0x2f, 0x81, 0xaf, 0x82, 0x32, 0xd3, 0x32, 0x5a, 0x87, 0x6c, + 0x61, 0xc3, 0x4f, 0xc3, 0xe0, 0xeb, 0x50, 0x3b, 0xec, 0x42, 0x51, 0x1c, 0xdc, 0x07, 0x8b, 0xb6, + 0xd5, 0xdd, 0xc4, 0x26, 0xee, 0x11, 0x56, 0xa1, 0xbd, 0xa5, 0xe4, 0xb7, 0x4e, 0xb3, 0xcd, 0xd7, + 0xfc, 0x1b, 0x85, 0x9d, 0x34, 0x84, 0x9d, 0xd8, 0x24, 0xcd, 0x3c, 0x08, 0x64, 0x94, 0xd0, 0x48, + 0x7d, 0xcc, 0x9c, 0x49, 0xfd, 0x03, 0x44, 0x96, 0x8f, 0x27, 0xfc, 0x9c, 0x99, 0x75, 0x9f, 0x56, + 0x3a, 0xd1, 0x7d, 0x9a, 0xe4, 0xc4, 0x31, 0x7b, 0xcc, 0x13, 0xc7, 0xa7, 0x0a, 0xb8, 0x64, 0x4f, + 0x90, 0x46, 0x15, 0xc0, 0xa7, 0xa5, 0x35, 0x66, 0x5a, 0x26, 0xc9, 0xc8, 0xe6, 0xea, 0x68, 0x58, + 0xbb, 0x34, 0x09, 0x12, 0x4d, 0xe4, 0x1a, 0x4b, 0x1a, 0x4b, 0xec, 0x7c, 0x95, 0x32, 0x77, 0xf3, + 0xca, 0x18, 0x37, 0xfd, 0x8d, 0xd2, 0xcb, 0x43, 0xff, 0x09, 0x05, 0x34, 0xea, 0x47, 0x45, 0x70, + 0x2e, 0x55, 0xad, 0xbf, 0xc2, 0xbb, 0xc2, 0xd4, 0x89, 0x26, 0x7f, 0x8c, 0x13, 0xcd, 0x1a, 0x58, + 0x10, 0x1f, 0x98, 0x13, 0x07, 0xa2, 0x20, 0x4c, 0x5a, 0xf1, 0x6e, 0x94, 0xc4, 0xcb, 0xee, 0x2a, + 0x8b, 0xc7, 0xbc, 0xab, 0x8c, 0x7a, 0x21, 0xfe, 0x17, 0xe5, 0xe5, 0x73, 0xda, 0x0b, 0xf1, 0xf7, + 0xa8, 0x24, 0x1e, 0xbe, 0xe1, 0x27, 0x6b, 0xc0, 0x30, 0xc3, 0x19, 0x12, 0xd9, 0x17, 0x10, 0x24, + 0xd0, 0x4f, 0xf4, 0x11, 0xf5, 0x7d, 0xc9, 0x47, 0xd4, 0xd5, 0x31, 0x61, 0x36, 0xf9, 0xb5, 0xa4, + 0xf4, 0xd0, 0x59, 0x3e, 0xfe, 0xa1, 0x53, 0xfd, 0x8b, 0x02, 0x9e, 0xcb, 0xdc, 0xa6, 0xe0, 0x5a, + 0x4c, 0x3d, 0x5e, 0x4b, 0xa8, 0xc7, 0xe7, 0x33, 0x0d, 0x23, 0x12, 0xd2, 0x90, 0xdf, 0x58, 0xde, + 0x1c, 0x7b, 0x63, 0x29, 0x39, 0x89, 0x8c, 0xbf, 0xba, 0x6c, 0xbe, 0xfe, 0xf0, 0x51, 0x75, 0xea, + 0xb3, 0x47, 0xd5, 0xa9, 0xcf, 0x1f, 0x55, 0xa7, 0x7e, 0x39, 0xaa, 0x2a, 0x0f, 0x47, 0x55, 0xe5, + 0xb3, 0x51, 0x55, 0xf9, 0x7c, 0x54, 0x55, 0xfe, 0x3e, 0xaa, 0x2a, 0xbf, 0xfd, 0xa2, 0x3a, 0x75, + 0x0f, 0xa6, 0xff, 0x95, 0xf9, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xae, 0x39, 0x4c, 0x13, 0xc3, + 0x29, 0x00, 0x00, } func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { @@ -2341,6 +2374,32 @@ func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *StatefulSetOrdinals) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetOrdinals) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetOrdinals) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2394,6 +2453,18 @@ func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Ordinals != nil { + { + size, err := m.Ordinals.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } if m.PersistentVolumeClaimRetentionPolicy != nil { { size, err := m.PersistentVolumeClaimRetentionPolicy.MarshalToSizedBuffer(dAtA[:i]) @@ -3054,6 +3125,16 @@ func (m *StatefulSetList) Size() (n int) { return n } +func (m *StatefulSetOrdinals) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Start)) + return n +} + func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Size() (n int) { if m == nil { return 0 @@ -3102,6 +3183,10 @@ func (m *StatefulSetSpec) Size() (n int) { l = m.PersistentVolumeClaimRetentionPolicy.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Ordinals != nil { + l = m.Ordinals.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -3516,6 +3601,16 @@ func (this *StatefulSetList) String() string { }, "") return s } +func (this *StatefulSetOrdinals) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetOrdinals{`, + `Start:` + fmt.Sprintf("%v", this.Start) + `,`, + `}`, + }, "") + return s +} func (this *StatefulSetPersistentVolumeClaimRetentionPolicy) String() string { if this == nil { return "nil" @@ -3547,6 +3642,7 @@ func (this *StatefulSetSpec) String() string { `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `PersistentVolumeClaimRetentionPolicy:` + strings.Replace(this.PersistentVolumeClaimRetentionPolicy.String(), "StatefulSetPersistentVolumeClaimRetentionPolicy", "StatefulSetPersistentVolumeClaimRetentionPolicy", 1) + `,`, + `Ordinals:` + strings.Replace(this.Ordinals.String(), "StatefulSetOrdinals", "StatefulSetOrdinals", 1) + `,`, `}`, }, "") return s @@ -7626,6 +7722,75 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } return nil } +func (m *StatefulSetOrdinals) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetOrdinals: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetOrdinals: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -8064,6 +8229,42 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ordinals", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Ordinals == nil { + m.Ordinals = &StatefulSetOrdinals{} + } + if err := m.Ordinals.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index 1bdc400b9..a7a7e7c54 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -127,6 +127,7 @@ message DaemonSetSpec { // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template optional k8s.io.api.core.v1.PodTemplateSpec template = 2; @@ -277,6 +278,7 @@ message DeploymentSpec { optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. @@ -449,7 +451,7 @@ message ReplicaSetSpec { // ReplicaSetStatus represents the current status of a ReplicaSet. message ReplicaSetStatus { - // Replicas is the most recently oberved number of replicas. + // Replicas is the most recently observed number of replicas. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller optional int32 replicas = 1; @@ -513,7 +515,6 @@ message RollingUpdateDaemonSet { // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. - // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } @@ -570,8 +571,9 @@ message RollingUpdateStatefulSetStrategy { // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// // The StatefulSet guarantees that a given network identity will always // map to the same storage identity. message StatefulSet { @@ -622,6 +624,21 @@ message StatefulSetList { repeated StatefulSet items = 2; } +// StatefulSetOrdinals describes the policy used for replica ordinal assignment +// in this StatefulSet. +message StatefulSetOrdinals { + // start is the number representing the first replica's index. It may be used + // to number replicas from an alternate index (eg: 1-indexed) over the default + // 0-indexed names, or to orchestrate progressive movement of replicas from + // one StatefulSet to another. + // If set, replica indices will be in the range: + // [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas). + // If unset, defaults to 0. Replica indices will be in the range: + // [0, .spec.replicas). + // +optional + optional int32 start = 1; +} + // StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs // created from the StatefulSet VolumeClaimTemplates. message StatefulSetPersistentVolumeClaimRetentionPolicy { @@ -657,7 +674,10 @@ message StatefulSetSpec { // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet // will fulfill this Template, but have a unique identity from the rest - // of the StatefulSet. + // of the StatefulSet. Each pod will be named with the format + // -. For example, a pod in a StatefulSet named + // "web" with index number "3" would be named "web-3". + // The only allowed template.spec.restartPolicy value is "Always". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -702,7 +722,6 @@ message StatefulSetSpec { // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) - // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. // +optional optional int32 minReadySeconds = 9; @@ -714,6 +733,14 @@ message StatefulSetSpec { // down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, // which is alpha. +optional optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10; + + // ordinals controls the numbering of replica indices in a StatefulSet. The + // default ordinals behavior assigns a "0" index to the first replica and + // increments the index by one for each additional replica requested. Using + // the ordinals field requires the StatefulSetStartOrdinal feature gate to be + // enabled, which is beta. + // +optional + optional StatefulSetOrdinals ordinals = 11; } // StatefulSetStatus represents the current state of a StatefulSet. @@ -758,7 +785,6 @@ message StatefulSetStatus { repeated StatefulSetCondition conditions = 10; // Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. - // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. // +optional optional int32 availableReplicas = 11; } diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go index 28474be7f..644d368fe 100644 --- a/vendor/k8s.io/api/apps/v1/types.go +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" @@ -29,6 +29,7 @@ const ( DeprecatedRollbackTo = "deprecated.deployment.rollback.to" DeprecatedTemplateGeneration = "deprecated.daemonset.template.generation" StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" + PodIndexLabel = "apps.kubernetes.io/pod-index" ) // +genclient @@ -39,8 +40,9 @@ const ( // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// // The StatefulSet guarantees that a given network identity will always // map to the same storage identity. type StatefulSet struct { @@ -162,6 +164,21 @@ type StatefulSetPersistentVolumeClaimRetentionPolicy struct { WhenScaled PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty" protobuf:"bytes,2,opt,name=whenScaled,casttype=PersistentVolumeClaimRetentionPolicyType"` } +// StatefulSetOrdinals describes the policy used for replica ordinal assignment +// in this StatefulSet. +type StatefulSetOrdinals struct { + // start is the number representing the first replica's index. It may be used + // to number replicas from an alternate index (eg: 1-indexed) over the default + // 0-indexed names, or to orchestrate progressive movement of replicas from + // one StatefulSet to another. + // If set, replica indices will be in the range: + // [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas). + // If unset, defaults to 0. Replica indices will be in the range: + // [0, .spec.replicas). + // +optional + Start int32 `json:"start" protobuf:"varint,1,opt,name=start"` +} + // A StatefulSetSpec is the specification of a StatefulSet. type StatefulSetSpec struct { // replicas is the desired number of replicas of the given Template. @@ -180,7 +197,10 @@ type StatefulSetSpec struct { // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet // will fulfill this Template, but have a unique identity from the rest - // of the StatefulSet. + // of the StatefulSet. Each pod will be named with the format + // -. For example, a pod in a StatefulSet named + // "web" with index number "3" would be named "web-3". + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -225,7 +245,6 @@ type StatefulSetSpec struct { // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) - // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. // +optional MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` @@ -237,6 +256,14 @@ type StatefulSetSpec struct { // down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, // which is alpha. +optional PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"` + + // ordinals controls the numbering of replica indices in a StatefulSet. The + // default ordinals behavior assigns a "0" index to the first replica and + // increments the index by one for each additional replica requested. Using + // the ordinals field requires the StatefulSetStartOrdinal feature gate to be + // enabled, which is beta. + // +optional + Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } // StatefulSetStatus represents the current state of a StatefulSet. @@ -281,7 +308,6 @@ type StatefulSetStatus struct { Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` // Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. - // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. // +optional AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"` } @@ -355,6 +381,7 @@ type DeploymentSpec struct { Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // The deployment strategy to use to replace existing pods with new ones. @@ -598,7 +625,6 @@ type RollingUpdateDaemonSet struct { // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. - // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } @@ -615,6 +641,7 @@ type DaemonSetSpec struct { // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` @@ -833,7 +860,7 @@ type ReplicaSetSpec struct { // ReplicaSetStatus represents the current status of a ReplicaSet. type ReplicaSetStatus struct { - // Replicas is the most recently oberved number of replicas. + // Replicas is the most recently observed number of replicas. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index f00d10aea..6676da064 100644 --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ @@ -85,7 +85,7 @@ func (DaemonSetList) SwaggerDoc() map[string]string { var map_DaemonSetSpec = map[string]string{ "": "DaemonSetSpec is the specification of a daemon set.", "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). The only allowed template.spec.restartPolicy value is \"Always\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", @@ -162,7 +162,7 @@ var map_DeploymentSpec = map[string]string{ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", - "template": "Template describes the pods that will be created.", + "template": "Template describes the pods that will be created. The only allowed template.spec.restartPolicy value is \"Always\".", "strategy": "The deployment strategy to use to replace existing pods with new ones.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", @@ -248,7 +248,7 @@ func (ReplicaSetSpec) SwaggerDoc() map[string]string { var map_ReplicaSetStatus = map[string]string{ "": "ReplicaSetStatus represents the current status of a ReplicaSet.", - "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "replicas": "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", "readyReplicas": "readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition.", "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", @@ -263,7 +263,7 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string { var map_RollingUpdateDaemonSet = map[string]string{ "": "Spec to control the desired behavior of daemon set rolling update.", "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", - "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.", + "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.", } func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { @@ -291,7 +291,7 @@ func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { } var map_StatefulSet = map[string]string{ - "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\n\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Spec defines the desired identities of pods in this set.", "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", @@ -324,6 +324,15 @@ func (StatefulSetList) SwaggerDoc() map[string]string { return map_StatefulSetList } +var map_StatefulSetOrdinals = map[string]string{ + "": "StatefulSetOrdinals describes the policy used for replica ordinal assignment in this StatefulSet.", + "start": "start is the number representing the first replica's index. It may be used to number replicas from an alternate index (eg: 1-indexed) over the default 0-indexed names, or to orchestrate progressive movement of replicas from one StatefulSet to another. If set, replica indices will be in the range:\n [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas).\nIf unset, defaults to 0. Replica indices will be in the range:\n [0, .spec.replicas).", +} + +func (StatefulSetOrdinals) SwaggerDoc() map[string]string { + return map_StatefulSetOrdinals +} + var map_StatefulSetPersistentVolumeClaimRetentionPolicy = map[string]string{ "": "StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", "whenDeleted": "WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", @@ -338,14 +347,15 @@ var map_StatefulSetSpec = map[string]string{ "": "A StatefulSetSpec is the specification of a StatefulSet.", "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format -. For example, a pod in a StatefulSet named \"web\" with index number \"3\" would be named \"web-3\". The only allowed template.spec.restartPolicy value is \"Always\".", "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. +optional", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { @@ -363,7 +373,7 @@ var map_StatefulSetStatus = map[string]string{ "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", "conditions": "Represents the latest available observations of a statefulset's current state.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go index 72d76670e..6912986ac 100644 --- a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go @@ -693,6 +693,22 @@ func (in *StatefulSetList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetOrdinals) DeepCopyInto(out *StatefulSetOrdinals) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetOrdinals. +func (in *StatefulSetOrdinals) DeepCopy() *StatefulSetOrdinals { + if in == nil { + return nil + } + out := new(StatefulSetOrdinals) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) { *out = *in @@ -741,6 +757,11 @@ func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { *out = new(StatefulSetPersistentVolumeClaimRetentionPolicy) **out = **in } + if in.Ordinals != nil { + in, out := &in.Ordinals, &out.Ordinals + *out = new(StatefulSetOrdinals) + **out = **in + } return } diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go index 48ac988f4..2f1e7c00a 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -553,12 +553,40 @@ func (m *StatefulSetList) XXX_DiscardUnknown() { var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo +func (m *StatefulSetOrdinals) Reset() { *m = StatefulSetOrdinals{} } +func (*StatefulSetOrdinals) ProtoMessage() {} +func (*StatefulSetOrdinals) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{18} +} +func (m *StatefulSetOrdinals) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetOrdinals) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetOrdinals) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetOrdinals.Merge(m, src) +} +func (m *StatefulSetOrdinals) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetOrdinals) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetOrdinals.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetOrdinals proto.InternalMessageInfo + func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Reset() { *m = StatefulSetPersistentVolumeClaimRetentionPolicy{} } func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {} func (*StatefulSetPersistentVolumeClaimRetentionPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{18} + return fileDescriptor_2a07313e8f66e805, []int{19} } func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -586,7 +614,7 @@ var xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy proto.Intern func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} func (*StatefulSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{19} + return fileDescriptor_2a07313e8f66e805, []int{20} } func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -614,7 +642,7 @@ var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} func (*StatefulSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{20} + return fileDescriptor_2a07313e8f66e805, []int{21} } func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -642,7 +670,7 @@ var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{21} + return fileDescriptor_2a07313e8f66e805, []int{22} } func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -688,6 +716,7 @@ func init() { proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta1.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta1.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta1.StatefulSetList") + proto.RegisterType((*StatefulSetOrdinals)(nil), "k8s.io.api.apps.v1beta1.StatefulSetOrdinals") proto.RegisterType((*StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), "k8s.io.api.apps.v1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy") proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta1.StatefulSetSpec") proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta1.StatefulSetStatus") @@ -699,131 +728,135 @@ func init() { } var fileDescriptor_2a07313e8f66e805 = []byte{ - // 1982 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0xcf, 0x6f, 0x1b, 0xc7, - 0xf5, 0xd7, 0x52, 0xa2, 0x44, 0x3d, 0x45, 0x94, 0x3d, 0xd2, 0xd7, 0x62, 0x94, 0x6f, 0x49, 0x81, - 0x35, 0x12, 0xe5, 0x87, 0x97, 0xb1, 0x92, 0x06, 0x89, 0xdd, 0xba, 0x15, 0x25, 0x37, 0x76, 0x20, - 0xc5, 0xca, 0x48, 0x8a, 0xd1, 0xf4, 0x07, 0x32, 0x24, 0xc7, 0xd4, 0x46, 0xfb, 0x0b, 0xbb, 0x43, - 0xc6, 0x44, 0x2f, 0xfd, 0x03, 0x0a, 0xa4, 0xe7, 0xfe, 0x15, 0xbd, 0xb5, 0x68, 0xd1, 0x4b, 0x0f, - 0x85, 0x8f, 0x41, 0x2f, 0x4d, 0x2f, 0x44, 0xcd, 0x5c, 0xdb, 0x5b, 0x7b, 0x31, 0x50, 0xa0, 0x98, - 0xd9, 0xd9, 0xdf, 0xbb, 0xd2, 0xb2, 0x80, 0x05, 0xb4, 0x37, 0xed, 0xbc, 0xf7, 0x3e, 0x6f, 0xe6, - 0xcd, 0x7b, 0x6f, 0xde, 0x87, 0x82, 0xef, 0x9d, 0xbd, 0xeb, 0xaa, 0x9a, 0xd5, 0x3a, 0x1b, 0x74, - 0xa8, 0x63, 0x52, 0x46, 0xdd, 0xd6, 0x90, 0x9a, 0x3d, 0xcb, 0x69, 0x49, 0x01, 0xb1, 0xb5, 0x16, - 0xb1, 0x6d, 0xb7, 0x35, 0xbc, 0xd9, 0xa1, 0x8c, 0xdc, 0x6c, 0xf5, 0xa9, 0x49, 0x1d, 0xc2, 0x68, - 0x4f, 0xb5, 0x1d, 0x8b, 0x59, 0x68, 0xdd, 0x53, 0x54, 0x89, 0xad, 0xa9, 0x5c, 0x51, 0x95, 0x8a, - 0x1b, 0x37, 0xfa, 0x1a, 0x3b, 0x1d, 0x74, 0xd4, 0xae, 0x65, 0xb4, 0xfa, 0x56, 0xdf, 0x6a, 0x09, - 0xfd, 0xce, 0xe0, 0x91, 0xf8, 0x12, 0x1f, 0xe2, 0x2f, 0x0f, 0x67, 0xa3, 0x19, 0x71, 0xd8, 0xb5, - 0x1c, 0xda, 0x1a, 0xa6, 0x7c, 0x6d, 0xbc, 0x1d, 0xea, 0x18, 0xa4, 0x7b, 0xaa, 0x99, 0xd4, 0x19, - 0xb5, 0xec, 0xb3, 0x3e, 0x5f, 0x70, 0x5b, 0x06, 0x65, 0x24, 0xcb, 0xaa, 0x95, 0x67, 0xe5, 0x0c, - 0x4c, 0xa6, 0x19, 0x34, 0x65, 0xf0, 0xce, 0x45, 0x06, 0x6e, 0xf7, 0x94, 0x1a, 0x24, 0x65, 0xf7, - 0x56, 0x9e, 0xdd, 0x80, 0x69, 0x7a, 0x4b, 0x33, 0x99, 0xcb, 0x9c, 0xa4, 0x51, 0xf3, 0x9f, 0x0a, - 0xa0, 0x5d, 0xcb, 0x64, 0x8e, 0xa5, 0xeb, 0xd4, 0xc1, 0x74, 0xa8, 0xb9, 0x9a, 0x65, 0xa2, 0x4f, - 0xa1, 0xc2, 0xcf, 0xd3, 0x23, 0x8c, 0xd4, 0x94, 0x4d, 0x65, 0x6b, 0x69, 0xfb, 0x4d, 0x35, 0x8c, - 0x74, 0x00, 0xaf, 0xda, 0x67, 0x7d, 0xbe, 0xe0, 0xaa, 0x5c, 0x5b, 0x1d, 0xde, 0x54, 0x1f, 0x74, - 0x3e, 0xa3, 0x5d, 0x76, 0x40, 0x19, 0x69, 0xa3, 0x27, 0xe3, 0xc6, 0xcc, 0x64, 0xdc, 0x80, 0x70, - 0x0d, 0x07, 0xa8, 0xe8, 0x01, 0xcc, 0x09, 0xf4, 0x92, 0x40, 0xbf, 0x91, 0x8b, 0x2e, 0x0f, 0xad, - 0x62, 0xf2, 0xf9, 0xdd, 0xc7, 0x8c, 0x9a, 0x7c, 0x7b, 0xed, 0x17, 0x24, 0xf4, 0xdc, 0x1e, 0x61, - 0x04, 0x0b, 0x20, 0xf4, 0x06, 0x54, 0x1c, 0xb9, 0xfd, 0xda, 0xec, 0xa6, 0xb2, 0x35, 0xdb, 0xbe, - 0x22, 0xb5, 0x2a, 0xfe, 0xb1, 0x70, 0xa0, 0xd1, 0x7c, 0xa2, 0xc0, 0xb5, 0xf4, 0xb9, 0xf7, 0x35, - 0x97, 0xa1, 0x1f, 0xa5, 0xce, 0xae, 0x16, 0x3b, 0x3b, 0xb7, 0x16, 0x27, 0x0f, 0x1c, 0xfb, 0x2b, - 0x91, 0x73, 0x1f, 0x42, 0x59, 0x63, 0xd4, 0x70, 0x6b, 0xa5, 0xcd, 0xd9, 0xad, 0xa5, 0xed, 0xd7, - 0xd5, 0x9c, 0x04, 0x56, 0xd3, 0xbb, 0x6b, 0x2f, 0x4b, 0xdc, 0xf2, 0x7d, 0x8e, 0x80, 0x3d, 0xa0, - 0xe6, 0xcf, 0x4b, 0x00, 0x7b, 0xd4, 0xd6, 0xad, 0x91, 0x41, 0x4d, 0x76, 0x09, 0x57, 0x77, 0x1f, - 0xe6, 0x5c, 0x9b, 0x76, 0xe5, 0xd5, 0xbd, 0x92, 0x7b, 0x82, 0x70, 0x53, 0x47, 0x36, 0xed, 0x86, - 0x97, 0xc6, 0xbf, 0xb0, 0x80, 0x40, 0x1f, 0xc1, 0xbc, 0xcb, 0x08, 0x1b, 0xb8, 0xe2, 0xca, 0x96, - 0xb6, 0x5f, 0x2d, 0x02, 0x26, 0x0c, 0xda, 0x55, 0x09, 0x37, 0xef, 0x7d, 0x63, 0x09, 0xd4, 0xfc, - 0xf3, 0x2c, 0xac, 0x86, 0xca, 0xbb, 0x96, 0xd9, 0xd3, 0x18, 0x4f, 0xe9, 0xdb, 0x30, 0xc7, 0x46, - 0x36, 0x15, 0x31, 0x59, 0x6c, 0xbf, 0xe2, 0x6f, 0xe6, 0x78, 0x64, 0xd3, 0x67, 0xe3, 0xc6, 0x7a, - 0x86, 0x09, 0x17, 0x61, 0x61, 0x84, 0xf6, 0x83, 0x7d, 0x96, 0x84, 0xf9, 0xdb, 0x71, 0xe7, 0xcf, - 0xc6, 0x8d, 0x8c, 0x06, 0xa2, 0x06, 0x48, 0xf1, 0x2d, 0xa2, 0xcf, 0xa0, 0xaa, 0x13, 0x97, 0x9d, - 0xd8, 0x3d, 0xc2, 0xe8, 0xb1, 0x66, 0xd0, 0xda, 0xbc, 0x38, 0xfd, 0x6b, 0xc5, 0x2e, 0x8a, 0x5b, - 0xb4, 0xaf, 0xc9, 0x1d, 0x54, 0xf7, 0x63, 0x48, 0x38, 0x81, 0x8c, 0x86, 0x80, 0xf8, 0xca, 0xb1, - 0x43, 0x4c, 0xd7, 0x3b, 0x15, 0xf7, 0xb7, 0x30, 0xb5, 0xbf, 0x0d, 0xe9, 0x0f, 0xed, 0xa7, 0xd0, - 0x70, 0x86, 0x07, 0xf4, 0x32, 0xcc, 0x3b, 0x94, 0xb8, 0x96, 0x59, 0x9b, 0x13, 0x11, 0x0b, 0xae, - 0x0b, 0x8b, 0x55, 0x2c, 0xa5, 0xe8, 0x55, 0x58, 0x30, 0xa8, 0xeb, 0x92, 0x3e, 0xad, 0x95, 0x85, - 0xe2, 0x8a, 0x54, 0x5c, 0x38, 0xf0, 0x96, 0xb1, 0x2f, 0x6f, 0xfe, 0x46, 0x81, 0x6a, 0x78, 0x4d, - 0x97, 0x50, 0xab, 0xf7, 0xe2, 0xb5, 0xfa, 0xcd, 0x02, 0xc9, 0x99, 0x53, 0xa3, 0x7f, 0x2b, 0x01, - 0x0a, 0x95, 0xb0, 0xa5, 0xeb, 0x1d, 0xd2, 0x3d, 0x43, 0x9b, 0x30, 0x67, 0x12, 0xc3, 0xcf, 0xc9, - 0xa0, 0x40, 0x3e, 0x24, 0x06, 0xc5, 0x42, 0x82, 0xbe, 0x50, 0x00, 0x0d, 0xc4, 0x6d, 0xf6, 0x76, - 0x4c, 0xd3, 0x62, 0x84, 0x07, 0xd8, 0xdf, 0xd0, 0x6e, 0x81, 0x0d, 0xf9, 0xbe, 0xd4, 0x93, 0x14, - 0xca, 0x5d, 0x93, 0x39, 0xa3, 0xf0, 0x62, 0xd3, 0x0a, 0x38, 0xc3, 0x35, 0xfa, 0x21, 0x80, 0x23, - 0x31, 0x8f, 0x2d, 0x59, 0xb6, 0xf9, 0x3d, 0xc0, 0x77, 0xbf, 0x6b, 0x99, 0x8f, 0xb4, 0x7e, 0xd8, - 0x58, 0x70, 0x00, 0x81, 0x23, 0x70, 0x1b, 0x77, 0x61, 0x3d, 0x67, 0x9f, 0xe8, 0x0a, 0xcc, 0x9e, - 0xd1, 0x91, 0x17, 0x2a, 0xcc, 0xff, 0x44, 0x6b, 0x50, 0x1e, 0x12, 0x7d, 0x40, 0xbd, 0x9a, 0xc4, - 0xde, 0xc7, 0xad, 0xd2, 0xbb, 0x4a, 0xf3, 0x57, 0xe5, 0x68, 0xa6, 0xf0, 0x7e, 0x83, 0xb6, 0xf8, - 0xf3, 0x60, 0xeb, 0x5a, 0x97, 0xb8, 0x02, 0xa3, 0xdc, 0x7e, 0xc1, 0x7b, 0x1a, 0xbc, 0x35, 0x1c, - 0x48, 0xd1, 0x8f, 0xa1, 0xe2, 0x52, 0x9d, 0x76, 0x99, 0xe5, 0xc8, 0x16, 0xf7, 0x56, 0xc1, 0x9c, - 0x22, 0x1d, 0xaa, 0x1f, 0x49, 0x53, 0x0f, 0xde, 0xff, 0xc2, 0x01, 0x24, 0xfa, 0x08, 0x2a, 0x8c, - 0x1a, 0xb6, 0x4e, 0x18, 0x95, 0xd1, 0x8b, 0xe5, 0x15, 0xef, 0x1d, 0x1c, 0xec, 0xd0, 0xea, 0x1d, - 0x4b, 0x35, 0xd1, 0x3d, 0x83, 0x3c, 0xf5, 0x57, 0x71, 0x00, 0x83, 0x7e, 0x00, 0x15, 0x97, 0xf1, - 0x57, 0xbd, 0x3f, 0x12, 0xd5, 0x76, 0xde, 0xb3, 0x12, 0xed, 0xa3, 0x9e, 0x49, 0x08, 0xed, 0xaf, - 0xe0, 0x00, 0x0e, 0xed, 0xc0, 0x8a, 0xa1, 0x99, 0x98, 0x92, 0xde, 0xe8, 0x88, 0x76, 0x2d, 0xb3, - 0xe7, 0x8a, 0x32, 0x2d, 0xb7, 0xd7, 0xa5, 0xd1, 0xca, 0x41, 0x5c, 0x8c, 0x93, 0xfa, 0x68, 0x1f, - 0xd6, 0xfc, 0x67, 0xf7, 0x9e, 0xe6, 0x32, 0xcb, 0x19, 0xed, 0x6b, 0x86, 0xc6, 0x44, 0xcf, 0x2b, - 0xb7, 0x6b, 0x93, 0x71, 0x63, 0x0d, 0x67, 0xc8, 0x71, 0xa6, 0x15, 0xef, 0x2b, 0x36, 0x19, 0xb8, - 0xb4, 0x27, 0x7a, 0x58, 0x25, 0xec, 0x2b, 0x87, 0x62, 0x15, 0x4b, 0x29, 0x7a, 0x18, 0x4b, 0xd3, - 0xca, 0x74, 0x69, 0x5a, 0xcd, 0x4f, 0x51, 0x74, 0x02, 0xeb, 0xb6, 0x63, 0xf5, 0x1d, 0xea, 0xba, - 0x7b, 0x94, 0xf4, 0x74, 0xcd, 0xa4, 0x7e, 0x64, 0x16, 0xc5, 0x89, 0x5e, 0x9a, 0x8c, 0x1b, 0xeb, - 0x87, 0xd9, 0x2a, 0x38, 0xcf, 0xb6, 0xf9, 0x87, 0x39, 0xb8, 0x92, 0x7c, 0xe3, 0xd0, 0x07, 0x80, - 0xac, 0x8e, 0x4b, 0x9d, 0x21, 0xed, 0xbd, 0xef, 0x0d, 0x6e, 0x7c, 0xba, 0x51, 0xc4, 0x74, 0x13, - 0xd4, 0xed, 0x83, 0x94, 0x06, 0xce, 0xb0, 0xf2, 0xe6, 0x23, 0x59, 0x00, 0x25, 0xb1, 0xd1, 0xc8, - 0x7c, 0x94, 0x2a, 0x82, 0x1d, 0x58, 0x91, 0xb5, 0xef, 0x0b, 0x45, 0xb2, 0x46, 0xee, 0xfd, 0x24, - 0x2e, 0xc6, 0x49, 0x7d, 0x74, 0x1b, 0x96, 0x1d, 0x9e, 0x07, 0x01, 0xc0, 0x82, 0x00, 0xf8, 0x3f, - 0x09, 0xb0, 0x8c, 0xa3, 0x42, 0x1c, 0xd7, 0x45, 0xef, 0xc3, 0x55, 0x32, 0x24, 0x9a, 0x4e, 0x3a, - 0x3a, 0x0d, 0x00, 0xe6, 0x04, 0xc0, 0x8b, 0x12, 0xe0, 0xea, 0x4e, 0x52, 0x01, 0xa7, 0x6d, 0xd0, - 0x01, 0xac, 0x0e, 0xcc, 0x34, 0x94, 0x97, 0xc4, 0x2f, 0x49, 0xa8, 0xd5, 0x93, 0xb4, 0x0a, 0xce, - 0xb2, 0x43, 0x9f, 0x02, 0x74, 0xfd, 0x57, 0xdd, 0xad, 0xcd, 0x8b, 0x36, 0xfc, 0x46, 0x81, 0x62, - 0x0b, 0x46, 0x81, 0xb0, 0x05, 0x06, 0x4b, 0x2e, 0x8e, 0x60, 0xa2, 0x5b, 0x50, 0xed, 0x5a, 0xba, - 0x2e, 0x32, 0x7f, 0xd7, 0x1a, 0x98, 0x4c, 0x24, 0x6f, 0xb9, 0x8d, 0xf8, 0x63, 0xbf, 0x1b, 0x93, - 0xe0, 0x84, 0x66, 0xf3, 0x77, 0x4a, 0xf4, 0x99, 0xf1, 0xcb, 0x19, 0xdd, 0x8a, 0x8d, 0x3e, 0x2f, - 0x27, 0x46, 0x9f, 0x6b, 0x69, 0x8b, 0xc8, 0xe4, 0xa3, 0xc1, 0x32, 0x4f, 0x7e, 0xcd, 0xec, 0x7b, - 0x17, 0x2e, 0x5b, 0xe2, 0x9b, 0xe7, 0x96, 0x52, 0xa0, 0x1d, 0x79, 0x18, 0xaf, 0x8a, 0x3b, 0x8f, - 0x0a, 0x71, 0x1c, 0xb9, 0x79, 0x07, 0xaa, 0xf1, 0x3a, 0x8c, 0xcd, 0xf4, 0xca, 0x85, 0x33, 0xfd, - 0xd7, 0x0a, 0xac, 0xe7, 0x78, 0x47, 0x3a, 0x54, 0x0d, 0xf2, 0x38, 0x72, 0xcd, 0x17, 0xce, 0xc6, - 0x9c, 0x35, 0xa9, 0x1e, 0x6b, 0x52, 0xef, 0x9b, 0xec, 0x81, 0x73, 0xc4, 0x1c, 0xcd, 0xec, 0x7b, - 0xf7, 0x70, 0x10, 0xc3, 0xc2, 0x09, 0x6c, 0xf4, 0x09, 0x54, 0x0c, 0xf2, 0xf8, 0x68, 0xe0, 0xf4, - 0xb3, 0xe2, 0x55, 0xcc, 0x8f, 0x78, 0x3f, 0x0e, 0x24, 0x0a, 0x0e, 0xf0, 0x9a, 0xbf, 0x57, 0x60, - 0x33, 0x76, 0x4a, 0xde, 0x2b, 0xe8, 0xa3, 0x81, 0x7e, 0x44, 0xc3, 0x1b, 0x7f, 0x1d, 0x16, 0x6d, - 0xe2, 0x30, 0x2d, 0xe8, 0x17, 0xe5, 0xf6, 0xf2, 0x64, 0xdc, 0x58, 0x3c, 0xf4, 0x17, 0x71, 0x28, - 0xcf, 0x88, 0x4d, 0xe9, 0xf9, 0xc5, 0xa6, 0xf9, 0x2f, 0x05, 0xca, 0x47, 0x5d, 0xa2, 0xd3, 0x4b, - 0x60, 0x2a, 0x7b, 0x31, 0xa6, 0xd2, 0xcc, 0xcd, 0x59, 0xb1, 0x9f, 0x5c, 0x92, 0xb2, 0x9f, 0x20, - 0x29, 0xd7, 0x2f, 0xc0, 0x39, 0x9f, 0x9f, 0xbc, 0x07, 0x8b, 0x81, 0xbb, 0x58, 0x53, 0x56, 0x2e, - 0x6a, 0xca, 0xcd, 0x5f, 0x96, 0x60, 0x29, 0xe2, 0x62, 0x3a, 0x6b, 0x1e, 0xee, 0xc8, 0x5c, 0xc3, - 0x1b, 0xd7, 0x76, 0x91, 0x83, 0xa8, 0xfe, 0x0c, 0xe3, 0x8d, 0x8b, 0xe1, 0xb0, 0x90, 0x1e, 0x6d, - 0xee, 0x40, 0x95, 0x11, 0xa7, 0x4f, 0x99, 0x2f, 0x13, 0x01, 0x5b, 0x0c, 0xb9, 0xca, 0x71, 0x4c, - 0x8a, 0x13, 0xda, 0x1b, 0xb7, 0x61, 0x39, 0xe6, 0x6c, 0xaa, 0x99, 0xef, 0x0b, 0x1e, 0x9c, 0xb0, - 0x14, 0x2e, 0x21, 0xbb, 0x3e, 0x88, 0x65, 0xd7, 0x56, 0x7e, 0x30, 0x23, 0x05, 0x9a, 0x97, 0x63, - 0x38, 0x91, 0x63, 0xaf, 0x15, 0x42, 0x3b, 0x3f, 0xd3, 0xfe, 0x5e, 0x82, 0xb5, 0x88, 0x76, 0x48, - 0x85, 0xbf, 0x1d, 0x7b, 0x0f, 0xb6, 0x12, 0xef, 0x41, 0x2d, 0xcb, 0xe6, 0xb9, 0x71, 0xe1, 0x6c, - 0x7e, 0x3a, 0xfb, 0xdf, 0xc8, 0x4f, 0x7f, 0xab, 0xc0, 0x4a, 0x24, 0x76, 0x97, 0x40, 0x50, 0xef, - 0xc7, 0x09, 0xea, 0xf5, 0x22, 0x49, 0x93, 0xc3, 0x50, 0xff, 0xa1, 0x40, 0x2b, 0xa2, 0x75, 0x48, - 0x1d, 0x57, 0x73, 0x19, 0x35, 0xd9, 0xc7, 0x96, 0x3e, 0x30, 0xe8, 0xae, 0x4e, 0x34, 0x03, 0x53, - 0xbe, 0xa0, 0x59, 0xe6, 0xa1, 0xa5, 0x6b, 0xdd, 0x11, 0x22, 0xb0, 0xf4, 0xf9, 0x29, 0x35, 0xf7, - 0xa8, 0x4e, 0x19, 0xed, 0xc9, 0x74, 0xfa, 0xae, 0x84, 0x5f, 0x7a, 0x18, 0x8a, 0x9e, 0x8d, 0x1b, - 0x5b, 0x45, 0x10, 0x45, 0x96, 0x45, 0x31, 0xd1, 0x4f, 0x00, 0xf8, 0xa7, 0xe8, 0x47, 0x3d, 0x99, - 0x70, 0x77, 0xfc, 0xaa, 0x7c, 0x18, 0x48, 0xa6, 0x72, 0x10, 0x41, 0x6c, 0xfe, 0x65, 0x21, 0x76, - 0x67, 0xff, 0xf3, 0x54, 0xf1, 0xa7, 0xb0, 0x36, 0x0c, 0xa3, 0xe3, 0x2b, 0xf0, 0xd1, 0x7a, 0x36, - 0xf9, 0xf3, 0x5b, 0x00, 0x9f, 0x15, 0xd7, 0xf6, 0xff, 0x4b, 0x27, 0x6b, 0x1f, 0x67, 0xc0, 0xe1, - 0x4c, 0x27, 0xe8, 0x5b, 0xb0, 0xc4, 0x69, 0x89, 0xd6, 0xa5, 0x1f, 0x12, 0xc3, 0xaf, 0xa7, 0x55, - 0x3f, 0x5f, 0x8e, 0x42, 0x11, 0x8e, 0xea, 0xa1, 0x53, 0x58, 0xb5, 0xad, 0xde, 0x01, 0x31, 0x49, - 0x9f, 0xf2, 0x61, 0xce, 0xbb, 0x4a, 0xc1, 0x1f, 0x17, 0xdb, 0xef, 0xf8, 0x23, 0xfc, 0x61, 0x5a, - 0xe5, 0x19, 0x27, 0x62, 0xe9, 0x65, 0x91, 0x04, 0x59, 0x90, 0xc8, 0x81, 0xea, 0x40, 0xce, 0x54, - 0x92, 0x4e, 0x7b, 0x3f, 0x94, 0x6d, 0x17, 0x29, 0xac, 0x93, 0x98, 0x65, 0xf8, 0xe8, 0xc5, 0xd7, - 0x71, 0xc2, 0x43, 0x2e, 0x3d, 0xae, 0xfc, 0x47, 0xf4, 0x38, 0x83, 0xaf, 0x2f, 0x4e, 0xc9, 0xd7, - 0xff, 0xa8, 0xc0, 0x75, 0xbb, 0x40, 0x2d, 0xd5, 0x40, 0xc4, 0xe6, 0x5e, 0x91, 0xd8, 0x14, 0xa9, - 0xcd, 0xf6, 0xd6, 0x64, 0xdc, 0xb8, 0x5e, 0x44, 0x13, 0x17, 0xda, 0x5f, 0xf3, 0xd7, 0x65, 0xb8, - 0x9a, 0x7a, 0x2d, 0xd1, 0xf7, 0xcf, 0xe1, 0xd4, 0xd7, 0x9e, 0x1b, 0x9f, 0x4e, 0x91, 0xe1, 0xd9, - 0x29, 0xc8, 0xf0, 0x0e, 0xac, 0x74, 0x07, 0x8e, 0x43, 0x4d, 0x96, 0xa0, 0xc2, 0xc1, 0xa5, 0xee, - 0xc6, 0xc5, 0x38, 0xa9, 0x9f, 0xc5, 0xe7, 0xcb, 0x53, 0xf2, 0xf9, 0xe8, 0x2e, 0x24, 0x27, 0xf3, - 0x4a, 0x30, 0xbd, 0x0b, 0x49, 0xcd, 0x92, 0xfa, 0x7c, 0x40, 0xf4, 0x50, 0x03, 0x84, 0x85, 0xf8, - 0x80, 0x78, 0x12, 0x93, 0xe2, 0x84, 0x76, 0x06, 0x37, 0x5e, 0x2c, 0xca, 0x8d, 0x11, 0x89, 0x31, - 0x77, 0x10, 0xfd, 0xee, 0x46, 0x91, 0xdc, 0x2d, 0x4e, 0xdd, 0x33, 0x7f, 0xb4, 0x58, 0x9a, 0xfe, - 0x47, 0x8b, 0xe6, 0x9f, 0x14, 0x78, 0x31, 0xb7, 0xb3, 0xa0, 0x9d, 0xd8, 0xf8, 0x76, 0x23, 0x31, - 0xbe, 0x7d, 0x23, 0xd7, 0x30, 0x32, 0xc3, 0x39, 0xd9, 0xac, 0xfe, 0xbd, 0x62, 0xac, 0x3e, 0x83, - 0x71, 0x5e, 0x4c, 0xef, 0xdb, 0xdf, 0x79, 0xf2, 0xb4, 0x3e, 0xf3, 0xe5, 0xd3, 0xfa, 0xcc, 0x57, - 0x4f, 0xeb, 0x33, 0x3f, 0x9b, 0xd4, 0x95, 0x27, 0x93, 0xba, 0xf2, 0xe5, 0xa4, 0xae, 0x7c, 0x35, - 0xa9, 0x2b, 0x7f, 0x9d, 0xd4, 0x95, 0x5f, 0x7c, 0x5d, 0x9f, 0xf9, 0x64, 0x3d, 0xe7, 0x3f, 0xbf, - 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x15, 0x68, 0x85, 0x2c, 0x1e, 0x00, 0x00, + // 2034 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xd7, 0x52, 0xa2, 0x44, 0x3d, 0x45, 0x54, 0x3c, 0x52, 0x2d, 0x46, 0x69, 0x25, 0x61, 0x63, + 0x24, 0x4a, 0x62, 0x2f, 0x63, 0x25, 0x0d, 0x12, 0xbb, 0x75, 0x2b, 0x4a, 0x6e, 0xec, 0x40, 0x8a, + 0x94, 0x91, 0x64, 0xa3, 0xe9, 0x07, 0x32, 0x22, 0xc7, 0xd4, 0x46, 0xfb, 0x85, 0xdd, 0x21, 0x63, + 0xa2, 0x97, 0xfe, 0x01, 0x05, 0xd2, 0x73, 0xff, 0x8a, 0xf6, 0xd4, 0xa2, 0x45, 0x2f, 0x3d, 0x14, + 0x3e, 0x06, 0xbd, 0x34, 0x27, 0xa2, 0x66, 0xae, 0xed, 0xad, 0xbd, 0x18, 0x28, 0x50, 0xcc, 0xec, + 0xec, 0xf7, 0xae, 0xb4, 0x2c, 0x60, 0x01, 0xed, 0x8d, 0x3b, 0xef, 0xbd, 0xdf, 0x7b, 0xf3, 0xe6, + 0xbd, 0x37, 0xef, 0x0d, 0xe1, 0xfb, 0x67, 0xef, 0x79, 0x9a, 0x6e, 0x37, 0xcf, 0x7a, 0x27, 0xd4, + 0xb5, 0x28, 0xa3, 0x5e, 0xb3, 0x4f, 0xad, 0x8e, 0xed, 0x36, 0x25, 0x81, 0x38, 0x7a, 0x93, 0x38, + 0x8e, 0xd7, 0xec, 0xdf, 0x3c, 0xa1, 0x8c, 0xdc, 0x6c, 0x76, 0xa9, 0x45, 0x5d, 0xc2, 0x68, 0x47, + 0x73, 0x5c, 0x9b, 0xd9, 0x68, 0xd9, 0x67, 0xd4, 0x88, 0xa3, 0x6b, 0x9c, 0x51, 0x93, 0x8c, 0x2b, + 0x37, 0xba, 0x3a, 0x3b, 0xed, 0x9d, 0x68, 0x6d, 0xdb, 0x6c, 0x76, 0xed, 0xae, 0xdd, 0x14, 0xfc, + 0x27, 0xbd, 0x47, 0xe2, 0x4b, 0x7c, 0x88, 0x5f, 0x3e, 0xce, 0x8a, 0x1a, 0x53, 0xd8, 0xb6, 0x5d, + 0xda, 0xec, 0x67, 0x74, 0xad, 0xbc, 0x13, 0xf1, 0x98, 0xa4, 0x7d, 0xaa, 0x5b, 0xd4, 0x1d, 0x34, + 0x9d, 0xb3, 0x2e, 0x5f, 0xf0, 0x9a, 0x26, 0x65, 0x24, 0x4f, 0xaa, 0x59, 0x24, 0xe5, 0xf6, 0x2c, + 0xa6, 0x9b, 0x34, 0x23, 0xf0, 0xee, 0x45, 0x02, 0x5e, 0xfb, 0x94, 0x9a, 0x24, 0x23, 0xf7, 0x76, + 0x91, 0x5c, 0x8f, 0xe9, 0x46, 0x53, 0xb7, 0x98, 0xc7, 0xdc, 0xb4, 0x90, 0xfa, 0x2f, 0x05, 0xd0, + 0xb6, 0x6d, 0x31, 0xd7, 0x36, 0x0c, 0xea, 0x62, 0xda, 0xd7, 0x3d, 0xdd, 0xb6, 0xd0, 0xa7, 0x50, + 0xe3, 0xfb, 0xe9, 0x10, 0x46, 0x1a, 0xca, 0xba, 0xb2, 0x31, 0xb7, 0xf9, 0x96, 0x16, 0x79, 0x3a, + 0x84, 0xd7, 0x9c, 0xb3, 0x2e, 0x5f, 0xf0, 0x34, 0xce, 0xad, 0xf5, 0x6f, 0x6a, 0xfb, 0x27, 0x9f, + 0xd1, 0x36, 0xdb, 0xa3, 0x8c, 0xb4, 0xd0, 0x93, 0xe1, 0xda, 0xc4, 0x68, 0xb8, 0x06, 0xd1, 0x1a, + 0x0e, 0x51, 0xd1, 0x3e, 0x4c, 0x09, 0xf4, 0x8a, 0x40, 0xbf, 0x51, 0x88, 0x2e, 0x37, 0xad, 0x61, + 0xf2, 0xf9, 0xdd, 0xc7, 0x8c, 0x5a, 0xdc, 0xbc, 0xd6, 0x0b, 0x12, 0x7a, 0x6a, 0x87, 0x30, 0x82, + 0x05, 0x10, 0xba, 0x0e, 0x35, 0x57, 0x9a, 0xdf, 0x98, 0x5c, 0x57, 0x36, 0x26, 0x5b, 0x2f, 0x4a, + 0xae, 0x5a, 0xb0, 0x2d, 0x1c, 0x72, 0xa8, 0x4f, 0x14, 0xb8, 0x9a, 0xdd, 0xf7, 0xae, 0xee, 0x31, + 0xf4, 0xe3, 0xcc, 0xde, 0xb5, 0x72, 0x7b, 0xe7, 0xd2, 0x62, 0xe7, 0xa1, 0xe2, 0x60, 0x25, 0xb6, + 0xef, 0x03, 0xa8, 0xea, 0x8c, 0x9a, 0x5e, 0xa3, 0xb2, 0x3e, 0xb9, 0x31, 0xb7, 0xf9, 0xa6, 0x56, + 0x10, 0xc0, 0x5a, 0xd6, 0xba, 0xd6, 0xbc, 0xc4, 0xad, 0xde, 0xe7, 0x08, 0xd8, 0x07, 0x52, 0x7f, + 0x51, 0x01, 0xd8, 0xa1, 0x8e, 0x61, 0x0f, 0x4c, 0x6a, 0xb1, 0x4b, 0x38, 0xba, 0xfb, 0x30, 0xe5, + 0x39, 0xb4, 0x2d, 0x8f, 0xee, 0xb5, 0xc2, 0x1d, 0x44, 0x46, 0x1d, 0x3a, 0xb4, 0x1d, 0x1d, 0x1a, + 0xff, 0xc2, 0x02, 0x02, 0x7d, 0x0c, 0xd3, 0x1e, 0x23, 0xac, 0xe7, 0x89, 0x23, 0x9b, 0xdb, 0x7c, + 0xbd, 0x0c, 0x98, 0x10, 0x68, 0xd5, 0x25, 0xdc, 0xb4, 0xff, 0x8d, 0x25, 0x90, 0xfa, 0xd7, 0x49, + 0x58, 0x8c, 0x98, 0xb7, 0x6d, 0xab, 0xa3, 0x33, 0x1e, 0xd2, 0xb7, 0x61, 0x8a, 0x0d, 0x1c, 0x2a, + 0x7c, 0x32, 0xdb, 0x7a, 0x2d, 0x30, 0xe6, 0x68, 0xe0, 0xd0, 0x67, 0xc3, 0xb5, 0xe5, 0x1c, 0x11, + 0x4e, 0xc2, 0x42, 0x08, 0xed, 0x86, 0x76, 0x56, 0x84, 0xf8, 0x3b, 0x49, 0xe5, 0xcf, 0x86, 0x6b, + 0x39, 0x05, 0x44, 0x0b, 0x91, 0x92, 0x26, 0xa2, 0xcf, 0xa0, 0x6e, 0x10, 0x8f, 0x1d, 0x3b, 0x1d, + 0xc2, 0xe8, 0x91, 0x6e, 0xd2, 0xc6, 0xb4, 0xd8, 0xfd, 0x1b, 0xe5, 0x0e, 0x8a, 0x4b, 0xb4, 0xae, + 0x4a, 0x0b, 0xea, 0xbb, 0x09, 0x24, 0x9c, 0x42, 0x46, 0x7d, 0x40, 0x7c, 0xe5, 0xc8, 0x25, 0x96, + 0xe7, 0xef, 0x8a, 0xeb, 0x9b, 0x19, 0x5b, 0xdf, 0x8a, 0xd4, 0x87, 0x76, 0x33, 0x68, 0x38, 0x47, + 0x03, 0x7a, 0x15, 0xa6, 0x5d, 0x4a, 0x3c, 0xdb, 0x6a, 0x4c, 0x09, 0x8f, 0x85, 0xc7, 0x85, 0xc5, + 0x2a, 0x96, 0x54, 0xf4, 0x3a, 0xcc, 0x98, 0xd4, 0xf3, 0x48, 0x97, 0x36, 0xaa, 0x82, 0x71, 0x41, + 0x32, 0xce, 0xec, 0xf9, 0xcb, 0x38, 0xa0, 0xab, 0xbf, 0x53, 0xa0, 0x1e, 0x1d, 0xd3, 0x25, 0xe4, + 0xea, 0xbd, 0x64, 0xae, 0xbe, 0x52, 0x22, 0x38, 0x0b, 0x72, 0xf4, 0xef, 0x15, 0x40, 0x11, 0x13, + 0xb6, 0x0d, 0xe3, 0x84, 0xb4, 0xcf, 0xd0, 0x3a, 0x4c, 0x59, 0xc4, 0x0c, 0x62, 0x32, 0x4c, 0x90, + 0x8f, 0x88, 0x49, 0xb1, 0xa0, 0xa0, 0x2f, 0x14, 0x40, 0x3d, 0x71, 0x9a, 0x9d, 0x2d, 0xcb, 0xb2, + 0x19, 0xe1, 0x0e, 0x0e, 0x0c, 0xda, 0x2e, 0x61, 0x50, 0xa0, 0x4b, 0x3b, 0xce, 0xa0, 0xdc, 0xb5, + 0x98, 0x3b, 0x88, 0x0e, 0x36, 0xcb, 0x80, 0x73, 0x54, 0xa3, 0x1f, 0x01, 0xb8, 0x12, 0xf3, 0xc8, + 0x96, 0x69, 0x5b, 0x5c, 0x03, 0x02, 0xf5, 0xdb, 0xb6, 0xf5, 0x48, 0xef, 0x46, 0x85, 0x05, 0x87, + 0x10, 0x38, 0x06, 0xb7, 0x72, 0x17, 0x96, 0x0b, 0xec, 0x44, 0x2f, 0xc2, 0xe4, 0x19, 0x1d, 0xf8, + 0xae, 0xc2, 0xfc, 0x27, 0x5a, 0x82, 0x6a, 0x9f, 0x18, 0x3d, 0xea, 0xe7, 0x24, 0xf6, 0x3f, 0x6e, + 0x55, 0xde, 0x53, 0xd4, 0x5f, 0x57, 0xe3, 0x91, 0xc2, 0xeb, 0x0d, 0xda, 0xe0, 0xd7, 0x83, 0x63, + 0xe8, 0x6d, 0xe2, 0x09, 0x8c, 0x6a, 0xeb, 0x05, 0xff, 0x6a, 0xf0, 0xd7, 0x70, 0x48, 0x45, 0x3f, + 0x81, 0x9a, 0x47, 0x0d, 0xda, 0x66, 0xb6, 0x2b, 0x4b, 0xdc, 0xdb, 0x25, 0x63, 0x8a, 0x9c, 0x50, + 0xe3, 0x50, 0x8a, 0xfa, 0xf0, 0xc1, 0x17, 0x0e, 0x21, 0xd1, 0xc7, 0x50, 0x63, 0xd4, 0x74, 0x0c, + 0xc2, 0xa8, 0xf4, 0x5e, 0x22, 0xae, 0x78, 0xed, 0xe0, 0x60, 0x07, 0x76, 0xe7, 0x48, 0xb2, 0x89, + 0xea, 0x19, 0xc6, 0x69, 0xb0, 0x8a, 0x43, 0x18, 0xf4, 0x43, 0xa8, 0x79, 0x8c, 0xdf, 0xea, 0xdd, + 0x81, 0xc8, 0xb6, 0xf3, 0xae, 0x95, 0x78, 0x1d, 0xf5, 0x45, 0x22, 0xe8, 0x60, 0x05, 0x87, 0x70, + 0x68, 0x0b, 0x16, 0x4c, 0xdd, 0xc2, 0x94, 0x74, 0x06, 0x87, 0xb4, 0x6d, 0x5b, 0x1d, 0x4f, 0xa4, + 0x69, 0xb5, 0xb5, 0x2c, 0x85, 0x16, 0xf6, 0x92, 0x64, 0x9c, 0xe6, 0x47, 0xbb, 0xb0, 0x14, 0x5c, + 0xbb, 0xf7, 0x74, 0x8f, 0xd9, 0xee, 0x60, 0x57, 0x37, 0x75, 0x26, 0x6a, 0x5e, 0xb5, 0xd5, 0x18, + 0x0d, 0xd7, 0x96, 0x70, 0x0e, 0x1d, 0xe7, 0x4a, 0xf1, 0xba, 0xe2, 0x90, 0x9e, 0x47, 0x3b, 0xa2, + 0x86, 0xd5, 0xa2, 0xba, 0x72, 0x20, 0x56, 0xb1, 0xa4, 0xa2, 0x87, 0x89, 0x30, 0xad, 0x8d, 0x17, + 0xa6, 0xf5, 0xe2, 0x10, 0x45, 0xc7, 0xb0, 0xec, 0xb8, 0x76, 0xd7, 0xa5, 0x9e, 0xb7, 0x43, 0x49, + 0xc7, 0xd0, 0x2d, 0x1a, 0x78, 0x66, 0x56, 0xec, 0xe8, 0xe5, 0xd1, 0x70, 0x6d, 0xf9, 0x20, 0x9f, + 0x05, 0x17, 0xc9, 0xaa, 0x7f, 0x9a, 0x82, 0x17, 0xd3, 0x77, 0x1c, 0xfa, 0x10, 0x90, 0x7d, 0xe2, + 0x51, 0xb7, 0x4f, 0x3b, 0x1f, 0xf8, 0x8d, 0x1b, 0xef, 0x6e, 0x14, 0xd1, 0xdd, 0x84, 0x79, 0xbb, + 0x9f, 0xe1, 0xc0, 0x39, 0x52, 0x7e, 0x7f, 0x24, 0x13, 0xa0, 0x22, 0x0c, 0x8d, 0xf5, 0x47, 0x99, + 0x24, 0xd8, 0x82, 0x05, 0x99, 0xfb, 0x01, 0x51, 0x04, 0x6b, 0xec, 0xdc, 0x8f, 0x93, 0x64, 0x9c, + 0xe6, 0x47, 0xb7, 0x61, 0xde, 0xe5, 0x71, 0x10, 0x02, 0xcc, 0x08, 0x80, 0x6f, 0x48, 0x80, 0x79, + 0x1c, 0x27, 0xe2, 0x24, 0x2f, 0xfa, 0x00, 0xae, 0x90, 0x3e, 0xd1, 0x0d, 0x72, 0x62, 0xd0, 0x10, + 0x60, 0x4a, 0x00, 0xbc, 0x24, 0x01, 0xae, 0x6c, 0xa5, 0x19, 0x70, 0x56, 0x06, 0xed, 0xc1, 0x62, + 0xcf, 0xca, 0x42, 0xf9, 0x41, 0xfc, 0xb2, 0x84, 0x5a, 0x3c, 0xce, 0xb2, 0xe0, 0x3c, 0x39, 0xf4, + 0x29, 0x40, 0x3b, 0xb8, 0xd5, 0xbd, 0xc6, 0xb4, 0x28, 0xc3, 0xd7, 0x4b, 0x24, 0x5b, 0xd8, 0x0a, + 0x44, 0x25, 0x30, 0x5c, 0xf2, 0x70, 0x0c, 0x13, 0xdd, 0x82, 0x7a, 0xdb, 0x36, 0x0c, 0x11, 0xf9, + 0xdb, 0x76, 0xcf, 0x62, 0x22, 0x78, 0xab, 0x2d, 0xc4, 0x2f, 0xfb, 0xed, 0x04, 0x05, 0xa7, 0x38, + 0xd5, 0x3f, 0x28, 0xf1, 0x6b, 0x26, 0x48, 0x67, 0x74, 0x2b, 0xd1, 0xfa, 0xbc, 0x9a, 0x6a, 0x7d, + 0xae, 0x66, 0x25, 0x62, 0x9d, 0x8f, 0x0e, 0xf3, 0x3c, 0xf8, 0x75, 0xab, 0xeb, 0x1f, 0xb8, 0x2c, + 0x89, 0x6f, 0x9d, 0x9b, 0x4a, 0x21, 0x77, 0xec, 0x62, 0xbc, 0x22, 0xce, 0x3c, 0x4e, 0xc4, 0x49, + 0x64, 0xf5, 0x0e, 0xd4, 0x93, 0x79, 0x98, 0xe8, 0xe9, 0x95, 0x0b, 0x7b, 0xfa, 0xaf, 0x15, 0x58, + 0x2e, 0xd0, 0x8e, 0x0c, 0xa8, 0x9b, 0xe4, 0x71, 0xec, 0x98, 0x2f, 0xec, 0x8d, 0xf9, 0xd4, 0xa4, + 0xf9, 0x53, 0x93, 0x76, 0xdf, 0x62, 0xfb, 0xee, 0x21, 0x73, 0x75, 0xab, 0xeb, 0x9f, 0xc3, 0x5e, + 0x02, 0x0b, 0xa7, 0xb0, 0xd1, 0x27, 0x50, 0x33, 0xc9, 0xe3, 0xc3, 0x9e, 0xdb, 0xcd, 0xf3, 0x57, + 0x39, 0x3d, 0xe2, 0xfe, 0xd8, 0x93, 0x28, 0x38, 0xc4, 0x53, 0xff, 0xa8, 0xc0, 0x7a, 0x62, 0x97, + 0xbc, 0x56, 0xd0, 0x47, 0x3d, 0xe3, 0x90, 0x46, 0x27, 0xfe, 0x26, 0xcc, 0x3a, 0xc4, 0x65, 0x7a, + 0x58, 0x2f, 0xaa, 0xad, 0xf9, 0xd1, 0x70, 0x6d, 0xf6, 0x20, 0x58, 0xc4, 0x11, 0x3d, 0xc7, 0x37, + 0x95, 0xe7, 0xe7, 0x1b, 0xf5, 0xdf, 0x0a, 0x54, 0x0f, 0xdb, 0xc4, 0xa0, 0x97, 0x30, 0xa9, 0xec, + 0x24, 0x26, 0x15, 0xb5, 0x30, 0x66, 0x85, 0x3d, 0x85, 0x43, 0xca, 0x6e, 0x6a, 0x48, 0xb9, 0x76, + 0x01, 0xce, 0xf9, 0xf3, 0xc9, 0xfb, 0x30, 0x1b, 0xaa, 0x4b, 0x14, 0x65, 0xe5, 0xa2, 0xa2, 0xac, + 0xfe, 0xaa, 0x02, 0x73, 0x31, 0x15, 0xe3, 0x49, 0x73, 0x77, 0xc7, 0xfa, 0x1a, 0x5e, 0xb8, 0x36, + 0xcb, 0x6c, 0x44, 0x0b, 0x7a, 0x18, 0xbf, 0x5d, 0x8c, 0x9a, 0x85, 0x6c, 0x6b, 0x73, 0x07, 0xea, + 0x8c, 0xb8, 0x5d, 0xca, 0x02, 0x9a, 0x70, 0xd8, 0x6c, 0x34, 0xab, 0x1c, 0x25, 0xa8, 0x38, 0xc5, + 0xbd, 0x72, 0x1b, 0xe6, 0x13, 0xca, 0xc6, 0xea, 0xf9, 0xbe, 0xe0, 0xce, 0x89, 0x52, 0xe1, 0x12, + 0xa2, 0xeb, 0xc3, 0x44, 0x74, 0x6d, 0x14, 0x3b, 0x33, 0x96, 0xa0, 0x45, 0x31, 0x86, 0x53, 0x31, + 0xf6, 0x46, 0x29, 0xb4, 0xf3, 0x23, 0xed, 0x1f, 0x15, 0x58, 0x8a, 0x71, 0x47, 0xa3, 0xf0, 0x77, + 0x12, 0xf7, 0xc1, 0x46, 0xea, 0x3e, 0x68, 0xe4, 0xc9, 0x3c, 0xb7, 0x59, 0x38, 0x7f, 0x3e, 0x9d, + 0xfc, 0x5f, 0x9c, 0x4f, 0x7f, 0xaf, 0xc0, 0x42, 0xcc, 0x77, 0x97, 0x30, 0xa0, 0xde, 0x4f, 0x0e, + 0xa8, 0xd7, 0xca, 0x04, 0x4d, 0xc1, 0x84, 0x7a, 0x0b, 0x16, 0x63, 0x4c, 0xfb, 0x6e, 0x47, 0xb7, + 0x88, 0xe1, 0xa1, 0x57, 0xa0, 0xea, 0x31, 0xe2, 0xb2, 0xe0, 0x12, 0x09, 0x64, 0x0f, 0xf9, 0x22, + 0xf6, 0x69, 0xea, 0x3f, 0x15, 0x68, 0xc6, 0x84, 0x0f, 0xa8, 0xeb, 0xe9, 0x1e, 0xa3, 0x16, 0x7b, + 0x60, 0x1b, 0x3d, 0x93, 0x6e, 0x1b, 0x44, 0x37, 0x31, 0xe5, 0x0b, 0xba, 0x6d, 0x1d, 0xd8, 0x86, + 0xde, 0x1e, 0x20, 0x02, 0x73, 0x9f, 0x9f, 0x52, 0x6b, 0x87, 0x1a, 0x94, 0xd1, 0x8e, 0x0c, 0xc5, + 0xef, 0x49, 0xf8, 0xb9, 0x87, 0x11, 0xe9, 0xd9, 0x70, 0x6d, 0xa3, 0x0c, 0xa2, 0x88, 0xd0, 0x38, + 0x26, 0xfa, 0x29, 0x00, 0xff, 0x14, 0xb5, 0xac, 0x23, 0x83, 0xf5, 0x4e, 0x90, 0xd1, 0x0f, 0x43, + 0xca, 0x58, 0x0a, 0x62, 0x88, 0xea, 0x6f, 0x6a, 0x89, 0xf3, 0xfe, 0xbf, 0x1f, 0x33, 0x7f, 0x06, + 0x4b, 0xfd, 0xc8, 0x3b, 0x01, 0x03, 0x6f, 0xcb, 0x27, 0xd3, 0x4f, 0x77, 0x21, 0x7c, 0x9e, 0x5f, + 0x5b, 0xdf, 0x94, 0x4a, 0x96, 0x1e, 0xe4, 0xc0, 0xe1, 0x5c, 0x25, 0xe8, 0xdb, 0x30, 0xc7, 0x47, + 0x1a, 0xbd, 0x4d, 0x3f, 0x22, 0x66, 0x90, 0x8b, 0x8b, 0x41, 0xbc, 0x1c, 0x46, 0x24, 0x1c, 0xe7, + 0x43, 0xa7, 0xb0, 0xe8, 0xd8, 0x9d, 0x3d, 0x62, 0x91, 0x2e, 0xe5, 0x8d, 0xa0, 0x7f, 0x94, 0x62, + 0xf6, 0x9c, 0x6d, 0xbd, 0x1b, 0xb4, 0xff, 0x07, 0x59, 0x96, 0x67, 0x7c, 0x88, 0xcb, 0x2e, 0x8b, + 0x20, 0xc8, 0x83, 0x44, 0x2e, 0xd4, 0x7b, 0xb2, 0x1f, 0x93, 0xa3, 0xb8, 0xff, 0xc8, 0xb6, 0x59, + 0x26, 0x29, 0x8f, 0x13, 0x92, 0xd1, 0x85, 0x99, 0x5c, 0xc7, 0x29, 0x0d, 0x85, 0xa3, 0x75, 0xed, + 0xbf, 0x1a, 0xad, 0x73, 0x66, 0xfd, 0xd9, 0x31, 0x67, 0xfd, 0x3f, 0x2b, 0x70, 0xcd, 0x29, 0x91, + 0x4b, 0x0d, 0x10, 0xbe, 0xb9, 0x57, 0xc6, 0x37, 0x65, 0x72, 0xb3, 0xb5, 0x31, 0x1a, 0xae, 0x5d, + 0x2b, 0xc3, 0x89, 0x4b, 0xd9, 0x87, 0x1e, 0x40, 0xcd, 0x96, 0x35, 0xb0, 0x31, 0x27, 0x6c, 0xbd, + 0x5e, 0xc6, 0xd6, 0xa0, 0x6e, 0xfa, 0x69, 0x19, 0x7c, 0xe1, 0x10, 0x4b, 0xfd, 0x6d, 0x15, 0xae, + 0x64, 0x6e, 0x70, 0xf4, 0x83, 0x73, 0xe6, 0xfc, 0xab, 0xcf, 0x6d, 0xc6, 0xcf, 0x0c, 0xe8, 0x93, + 0x63, 0x0c, 0xe8, 0x5b, 0xb0, 0xd0, 0xee, 0xb9, 0x2e, 0xb5, 0x58, 0x6a, 0x3c, 0x0f, 0x83, 0x65, + 0x3b, 0x49, 0xc6, 0x69, 0xfe, 0xbc, 0x37, 0x86, 0xea, 0x98, 0x6f, 0x0c, 0x71, 0x2b, 0xe4, 0x9c, + 0xe8, 0xa7, 0x76, 0xd6, 0x0a, 0x39, 0x2e, 0xa6, 0xf9, 0x79, 0xd3, 0xea, 0xa3, 0x86, 0x08, 0x33, + 0xc9, 0xa6, 0xf5, 0x38, 0x41, 0xc5, 0x29, 0xee, 0x9c, 0x79, 0x7d, 0xb6, 0xec, 0xbc, 0x8e, 0x48, + 0xe2, 0x35, 0x01, 0x44, 0x1d, 0xbd, 0x51, 0x26, 0xce, 0xca, 0x3f, 0x27, 0xe4, 0x3e, 0xa4, 0xcc, + 0x8d, 0xff, 0x90, 0xa2, 0xfe, 0x45, 0x81, 0x97, 0x0a, 0x2b, 0x16, 0xda, 0x4a, 0xb4, 0x94, 0x37, + 0x52, 0x2d, 0xe5, 0xb7, 0x0a, 0x05, 0x63, 0x7d, 0xa5, 0x9b, 0xff, 0xd2, 0xf0, 0x7e, 0xb9, 0x97, + 0x86, 0x9c, 0x29, 0xf8, 0xe2, 0x27, 0x87, 0xd6, 0x77, 0x9f, 0x3c, 0x5d, 0x9d, 0xf8, 0xf2, 0xe9, + 0xea, 0xc4, 0x57, 0x4f, 0x57, 0x27, 0x7e, 0x3e, 0x5a, 0x55, 0x9e, 0x8c, 0x56, 0x95, 0x2f, 0x47, + 0xab, 0xca, 0x57, 0xa3, 0x55, 0xe5, 0x6f, 0xa3, 0x55, 0xe5, 0x97, 0x5f, 0xaf, 0x4e, 0x7c, 0xb2, + 0x5c, 0xf0, 0x6f, 0xf4, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xaf, 0x0a, 0xd6, 0x32, 0xc0, 0x1e, + 0x00, 0x00, } func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { @@ -1737,6 +1770,32 @@ func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *StatefulSetOrdinals) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetOrdinals) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetOrdinals) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1790,6 +1849,18 @@ func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Ordinals != nil { + { + size, err := m.Ordinals.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } if m.PersistentVolumeClaimRetentionPolicy != nil { { size, err := m.PersistentVolumeClaimRetentionPolicy.MarshalToSizedBuffer(dAtA[:i]) @@ -2312,6 +2383,16 @@ func (m *StatefulSetList) Size() (n int) { return n } +func (m *StatefulSetOrdinals) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Start)) + return n +} + func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Size() (n int) { if m == nil { return 0 @@ -2360,6 +2441,10 @@ func (m *StatefulSetSpec) Size() (n int) { l = m.PersistentVolumeClaimRetentionPolicy.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Ordinals != nil { + l = m.Ordinals.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2676,6 +2761,16 @@ func (this *StatefulSetList) String() string { }, "") return s } +func (this *StatefulSetOrdinals) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetOrdinals{`, + `Start:` + fmt.Sprintf("%v", this.Start) + `,`, + `}`, + }, "") + return s +} func (this *StatefulSetPersistentVolumeClaimRetentionPolicy) String() string { if this == nil { return "nil" @@ -2707,6 +2802,7 @@ func (this *StatefulSetSpec) String() string { `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `PersistentVolumeClaimRetentionPolicy:` + strings.Replace(this.PersistentVolumeClaimRetentionPolicy.String(), "StatefulSetPersistentVolumeClaimRetentionPolicy", "StatefulSetPersistentVolumeClaimRetentionPolicy", 1) + `,`, + `Ordinals:` + strings.Replace(this.Ordinals.String(), "StatefulSetOrdinals", "StatefulSetOrdinals", 1) + `,`, `}`, }, "") return s @@ -5601,6 +5697,75 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } return nil } +func (m *StatefulSetOrdinals) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetOrdinals: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetOrdinals: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -6039,6 +6204,42 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ordinals", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Ordinals == nil { + m.Ordinals = &StatefulSetOrdinals{} + } + if err := m.Ordinals.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 53df96ee4..245ec30f4 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -47,10 +47,10 @@ message ControllerRevision { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Data is the serialized representation of the state. + // data is the serialized representation of the state. optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; - // Revision indicates the revision of the state represented by Data. + // revision indicates the revision of the state represented by Data. optional int64 revision = 3; } @@ -128,17 +128,18 @@ message DeploymentRollback { // DeploymentSpec is the specification of the desired behavior of the Deployment. message DeploymentSpec { - // Number of desired pods. This is a pointer to distinguish between explicit + // replicas is the number of desired pods. This is a pointer to distinguish between explicit // zero and not specified. Defaults to 1. // +optional optional int32 replicas = 1; - // Label selector for pods. Existing ReplicaSets whose pods are + // selector is the label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. @@ -146,28 +147,28 @@ message DeploymentSpec { // +patchStrategy=retainKeys optional DeploymentStrategy strategy = 4; - // Minimum number of seconds for which a newly created pod should be ready + // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional optional int32 minReadySeconds = 5; - // The number of old ReplicaSets to retain to allow rollback. + // revisionHistoryLimit is the number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. // Defaults to 2. // +optional optional int32 revisionHistoryLimit = 6; - // Indicates that the deployment is paused. + // paused indicates that the deployment is paused. // +optional optional bool paused = 7; // DEPRECATED. - // The config this deployment is rolling back to. Will be cleared after rollback is done. + // rollbackTo is the config this deployment is rolling back to. Will be cleared after rollback is done. // +optional optional RollbackConfig rollbackTo = 8; - // The maximum time in seconds for a deployment to make progress before it + // progressDeadlineSeconds is the maximum time in seconds for a deployment to make progress before it // is considered to be failed. The deployment controller will continue to // process failed deployments and a condition with a ProgressDeadlineExceeded // reason will be surfaced in the deployment status. Note that progress will @@ -178,15 +179,15 @@ message DeploymentSpec { // DeploymentStatus is the most recently observed status of the Deployment. message DeploymentStatus { - // The generation observed by the deployment controller. + // observedGeneration is the generation observed by the deployment controller. // +optional optional int64 observedGeneration = 1; - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector). // +optional optional int32 replicas = 2; - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec. // +optional optional int32 updatedReplicas = 3; @@ -198,18 +199,18 @@ message DeploymentStatus { // +optional optional int32 availableReplicas = 4; - // Total number of unavailable pods targeted by this deployment. This is the total number of + // unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of // pods that are still required for the deployment to have 100% available capacity. They may // either be pods that are running but not yet available or pods that still have not been created. // +optional optional int32 unavailableReplicas = 5; - // Represents the latest available observations of a deployment's current state. + // Conditions represent the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge repeated DeploymentCondition conditions = 6; - // Count of hash collisions for the Deployment. The Deployment controller uses this + // collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this // field as a collision avoidance mechanism when it needs to create the name for the // newest ReplicaSet. // +optional @@ -276,7 +277,7 @@ message RollingUpdateStatefulSetStrategy { // This is helpful in being able to do a canary based deployment. The default value is 0. optional int32 partition = 1; - // The maximum number of pods that can be unavailable during the update. + // maxUnavailable is the maximum number of pods that can be unavailable during the update. // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). // Absolute number is calculated from percentage by rounding up. This can not be 0. // Defaults to 1. This field is alpha-level and is only honored by servers that enable the @@ -293,32 +294,32 @@ message Scale { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // status defines current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } // ScaleSpec describes the attributes of a scale subresource message ScaleSpec { - // desired number of instances for the scaled object. + // replicas is the number of observed instances of the scaled object. // +optional optional int32 replicas = 1; } // ScaleStatus represents the current status of a scale subresource. message ScaleStatus { - // actual number of observed instances of the scaled object. + // replias is the actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional map selector = 2; - // label selector for pods that should match the replicas count. This is a serializated + // targetSelector is the label selector for pods that should match the replicas count. This is a serializated // version of both map-based and more expressive set-based selectors. This is done to // avoid introspection in the clients. The string will be in the same format as the // query-param syntax. If the target type only supports map-based selectors, both this @@ -332,8 +333,9 @@ message ScaleStatus { // more information. // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// // The StatefulSet guarantees that a given network identity will always // map to the same storage identity. message StatefulSet { @@ -379,16 +381,31 @@ message StatefulSetList { repeated StatefulSet items = 2; } +// StatefulSetOrdinals describes the policy used for replica ordinal assignment +// in this StatefulSet. +message StatefulSetOrdinals { + // start is the number representing the first replica's index. It may be used + // to number replicas from an alternate index (eg: 1-indexed) over the default + // 0-indexed names, or to orchestrate progressive movement of replicas from + // one StatefulSet to another. + // If set, replica indices will be in the range: + // [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas). + // If unset, defaults to 0. Replica indices will be in the range: + // [0, .spec.replicas). + // +optional + optional int32 start = 1; +} + // StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs // created from the StatefulSet VolumeClaimTemplates. message StatefulSetPersistentVolumeClaimRetentionPolicy { - // WhenDeleted specifies what happens to PVCs created from StatefulSet + // whenDeleted specifies what happens to PVCs created from StatefulSet // VolumeClaimTemplates when the StatefulSet is deleted. The default policy // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The // `Delete` policy causes those PVCs to be deleted. optional string whenDeleted = 1; - // WhenScaled specifies what happens to PVCs created from StatefulSet + // whenScaled specifies what happens to PVCs created from StatefulSet // VolumeClaimTemplates when the StatefulSet is scaled down. The default // policy of `Retain` causes PVCs to not be affected by a scaledown. The // `Delete` policy causes the associated PVCs for any excess pods above @@ -415,7 +432,9 @@ message StatefulSetSpec { // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet // will fulfill this Template, but have a unique identity from the rest - // of the StatefulSet. + // of the StatefulSet. Each pod will be named with the format + // -. For example, a pod in a StatefulSet named + // "web" with index number "3" would be named "web-3". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -457,10 +476,9 @@ message StatefulSetSpec { // StatefulSetSpec version. The default value is 10. optional int32 revisionHistoryLimit = 8; - // Minimum number of seconds for which a newly created pod should be ready + // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready // without any of its container crashing for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) - // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. // +optional optional int32 minReadySeconds = 9; @@ -469,6 +487,14 @@ message StatefulSetSpec { // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. // +optional optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10; + + // ordinals controls the numbering of replica indices in a StatefulSet. The + // default ordinals behavior assigns a "0" index to the first replica and + // increments the index by one for each additional replica requested. Using + // the ordinals field requires the StatefulSetStartOrdinal feature gate to be + // enabled, which is beta. + // +optional + optional StatefulSetOrdinals ordinals = 11; } // StatefulSetStatus represents the current state of a StatefulSet. @@ -506,14 +532,13 @@ message StatefulSetStatus { // +optional optional int32 collisionCount = 9; - // Represents the latest available observations of a statefulset's current state. + // conditions represent the latest available observations of a statefulset's current state. // +optional // +patchMergeKey=type // +patchStrategy=merge repeated StatefulSetCondition conditions = 10; - // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. - // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. + // availableReplicas is the total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. // +optional optional int32 availableReplicas = 11; } diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index cdc81c956..59ed9c2ac 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -31,21 +31,21 @@ const ( // ScaleSpec describes the attributes of a scale subresource type ScaleSpec struct { - // desired number of instances for the scaled object. + // replicas is the number of observed instances of the scaled object. // +optional Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` } // ScaleStatus represents the current status of a scale subresource. type ScaleStatus struct { - // actual number of observed instances of the scaled object. + // replias is the actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` - // label selector for pods that should match the replicas count. This is a serializated + // targetSelector is the label selector for pods that should match the replicas count. This is a serializated // version of both map-based and more expressive set-based selectors. This is done to // avoid introspection in the clients. The string will be in the same format as the // query-param syntax. If the target type only supports map-based selectors, both this @@ -68,11 +68,11 @@ type Scale struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // status defines current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -88,8 +88,9 @@ type Scale struct { // more information. // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// // The StatefulSet guarantees that a given network identity will always // map to the same storage identity. type StatefulSet struct { @@ -158,7 +159,7 @@ type RollingUpdateStatefulSetStrategy struct { // Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. // This is helpful in being able to do a canary based deployment. The default value is 0. Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` - // The maximum number of pods that can be unavailable during the update. + // maxUnavailable is the maximum number of pods that can be unavailable during the update. // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). // Absolute number is calculated from percentage by rounding up. This can not be 0. // Defaults to 1. This field is alpha-level and is only honored by servers that enable the @@ -190,12 +191,12 @@ const ( // StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs // created from the StatefulSet VolumeClaimTemplates. type StatefulSetPersistentVolumeClaimRetentionPolicy struct { - // WhenDeleted specifies what happens to PVCs created from StatefulSet + // whenDeleted specifies what happens to PVCs created from StatefulSet // VolumeClaimTemplates when the StatefulSet is deleted. The default policy // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The // `Delete` policy causes those PVCs to be deleted. WhenDeleted PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty" protobuf:"bytes,1,opt,name=whenDeleted,casttype=PersistentVolumeClaimRetentionPolicyType"` - // WhenScaled specifies what happens to PVCs created from StatefulSet + // whenScaled specifies what happens to PVCs created from StatefulSet // VolumeClaimTemplates when the StatefulSet is scaled down. The default // policy of `Retain` causes PVCs to not be affected by a scaledown. The // `Delete` policy causes the associated PVCs for any excess pods above @@ -203,6 +204,21 @@ type StatefulSetPersistentVolumeClaimRetentionPolicy struct { WhenScaled PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty" protobuf:"bytes,2,opt,name=whenScaled,casttype=PersistentVolumeClaimRetentionPolicyType"` } +// StatefulSetOrdinals describes the policy used for replica ordinal assignment +// in this StatefulSet. +type StatefulSetOrdinals struct { + // start is the number representing the first replica's index. It may be used + // to number replicas from an alternate index (eg: 1-indexed) over the default + // 0-indexed names, or to orchestrate progressive movement of replicas from + // one StatefulSet to another. + // If set, replica indices will be in the range: + // [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas). + // If unset, defaults to 0. Replica indices will be in the range: + // [0, .spec.replicas). + // +optional + Start int32 `json:"start" protobuf:"varint,1,opt,name=start"` +} + // A StatefulSetSpec is the specification of a StatefulSet. type StatefulSetSpec struct { // replicas is the desired number of replicas of the given Template. @@ -222,7 +238,9 @@ type StatefulSetSpec struct { // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet // will fulfill this Template, but have a unique identity from the rest - // of the StatefulSet. + // of the StatefulSet. Each pod will be named with the format + // -. For example, a pod in a StatefulSet named + // "web" with index number "3" would be named "web-3". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -264,10 +282,9 @@ type StatefulSetSpec struct { // StatefulSetSpec version. The default value is 10. RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"` - // Minimum number of seconds for which a newly created pod should be ready + // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready // without any of its container crashing for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) - // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. // +optional MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` @@ -276,6 +293,14 @@ type StatefulSetSpec struct { // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. // +optional PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"` + + // ordinals controls the numbering of replica indices in a StatefulSet. The + // default ordinals behavior assigns a "0" index to the first replica and + // increments the index by one for each additional replica requested. Using + // the ordinals field requires the StatefulSetStartOrdinal feature gate to be + // enabled, which is beta. + // +optional + Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } // StatefulSetStatus represents the current state of a StatefulSet. @@ -313,14 +338,13 @@ type StatefulSetStatus struct { // +optional CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` - // Represents the latest available observations of a statefulset's current state. + // conditions represent the latest available observations of a statefulset's current state. // +optional // +patchMergeKey=type // +patchStrategy=merge Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` - // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. - // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. + // availableReplicas is the total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. // +optional AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"` } @@ -385,17 +409,18 @@ type Deployment struct { // DeploymentSpec is the specification of the desired behavior of the Deployment. type DeploymentSpec struct { - // Number of desired pods. This is a pointer to distinguish between explicit + // replicas is the number of desired pods. This is a pointer to distinguish between explicit // zero and not specified. Defaults to 1. // +optional Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - // Label selector for pods. Existing ReplicaSets whose pods are + // selector is the label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // The deployment strategy to use to replace existing pods with new ones. @@ -403,28 +428,28 @@ type DeploymentSpec struct { // +patchStrategy=retainKeys Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` - // Minimum number of seconds for which a newly created pod should be ready + // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` - // The number of old ReplicaSets to retain to allow rollback. + // revisionHistoryLimit is the number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. // Defaults to 2. // +optional RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` - // Indicates that the deployment is paused. + // paused indicates that the deployment is paused. // +optional Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` // DEPRECATED. - // The config this deployment is rolling back to. Will be cleared after rollback is done. + // rollbackTo is the config this deployment is rolling back to. Will be cleared after rollback is done. // +optional RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"` - // The maximum time in seconds for a deployment to make progress before it + // progressDeadlineSeconds is the maximum time in seconds for a deployment to make progress before it // is considered to be failed. The deployment controller will continue to // process failed deployments and a condition with a ProgressDeadlineExceeded // reason will be surfaced in the deployment status. Note that progress will @@ -523,15 +548,15 @@ type RollingUpdateDeployment struct { // DeploymentStatus is the most recently observed status of the Deployment. type DeploymentStatus struct { - // The generation observed by the deployment controller. + // observedGeneration is the generation observed by the deployment controller. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector). // +optional Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec. // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` @@ -543,18 +568,18 @@ type DeploymentStatus struct { // +optional AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` - // Total number of unavailable pods targeted by this deployment. This is the total number of + // unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of // pods that are still required for the deployment to have 100% available capacity. They may // either be pods that are running but not yet available or pods that still have not been created. // +optional UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` - // Represents the latest available observations of a deployment's current state. + // Conditions represent the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` - // Count of hash collisions for the Deployment. The Deployment controller uses this + // collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this // field as a collision avoidance mechanism when it needs to create the name for the // newest ReplicaSet. // +optional @@ -636,10 +661,10 @@ type ControllerRevision struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Data is the serialized representation of the state. + // data is the serialized representation of the state. Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` - // Revision indicates the revision of the state represented by Data. + // revision indicates the revision of the state represented by Data. Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"` } diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index f57b7b2ef..a62e9869d 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -24,14 +24,14 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "data": "Data is the serialized representation of the state.", - "revision": "Revision indicates the revision of the state represented by Data.", + "data": "data is the serialized representation of the state.", + "revision": "revision indicates the revision of the state represented by Data.", } func (ControllerRevision) SwaggerDoc() map[string]string { @@ -96,15 +96,15 @@ func (DeploymentRollback) SwaggerDoc() map[string]string { var map_DeploymentSpec = map[string]string{ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", - "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", - "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", - "template": "Template describes the pods that will be created.", + "replicas": "replicas is the number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "selector": "selector is the label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", + "template": "Template describes the pods that will be created. The only allowed template.spec.restartPolicy value is \"Always\".", "strategy": "The deployment strategy to use to replace existing pods with new ones.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.", - "paused": "Indicates that the deployment is paused.", - "rollbackTo": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.", - "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", + "minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "revisionHistoryLimit": "revisionHistoryLimit is the number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.", + "paused": "paused indicates that the deployment is paused.", + "rollbackTo": "DEPRECATED. rollbackTo is the config this deployment is rolling back to. Will be cleared after rollback is done.", + "progressDeadlineSeconds": "progressDeadlineSeconds is the maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", } func (DeploymentSpec) SwaggerDoc() map[string]string { @@ -113,14 +113,14 @@ func (DeploymentSpec) SwaggerDoc() map[string]string { var map_DeploymentStatus = map[string]string{ "": "DeploymentStatus is the most recently observed status of the Deployment.", - "observedGeneration": "The generation observed by the deployment controller.", - "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", - "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "observedGeneration": "observedGeneration is the generation observed by the deployment controller.", + "replicas": "replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec.", "readyReplicas": "readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.", "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", - "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", - "conditions": "Represents the latest available observations of a deployment's current state.", - "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", + "unavailableReplicas": "unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + "conditions": "Conditions represent the latest available observations of a deployment's current state.", + "collisionCount": "collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", } func (DeploymentStatus) SwaggerDoc() map[string]string { @@ -159,7 +159,7 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string { var map_RollingUpdateStatefulSetStrategy = map[string]string{ "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0.", - "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is alpha-level and is only honored by servers that enable the MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable.", + "maxUnavailable": "maxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is alpha-level and is only honored by servers that enable the MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable.", } func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { @@ -169,8 +169,8 @@ func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", + "spec": "spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "status defines current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { @@ -179,7 +179,7 @@ func (Scale) SwaggerDoc() map[string]string { var map_ScaleSpec = map[string]string{ "": "ScaleSpec describes the attributes of a scale subresource", - "replicas": "desired number of instances for the scaled object.", + "replicas": "replicas is the number of observed instances of the scaled object.", } func (ScaleSpec) SwaggerDoc() map[string]string { @@ -188,9 +188,9 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "ScaleStatus represents the current status of a scale subresource.", - "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", - "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "replicas": "replias is the actual number of observed instances of the scaled object.", + "selector": "selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", + "targetSelector": "targetSelector is the label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", } func (ScaleStatus) SwaggerDoc() map[string]string { @@ -198,7 +198,7 @@ func (ScaleStatus) SwaggerDoc() map[string]string { } var map_StatefulSet = map[string]string{ - "": "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "": "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\n\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", "spec": "Spec defines the desired identities of pods in this set.", "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", } @@ -228,10 +228,19 @@ func (StatefulSetList) SwaggerDoc() map[string]string { return map_StatefulSetList } +var map_StatefulSetOrdinals = map[string]string{ + "": "StatefulSetOrdinals describes the policy used for replica ordinal assignment in this StatefulSet.", + "start": "start is the number representing the first replica's index. It may be used to number replicas from an alternate index (eg: 1-indexed) over the default 0-indexed names, or to orchestrate progressive movement of replicas from one StatefulSet to another. If set, replica indices will be in the range:\n [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas).\nIf unset, defaults to 0. Replica indices will be in the range:\n [0, .spec.replicas).", +} + +func (StatefulSetOrdinals) SwaggerDoc() map[string]string { + return map_StatefulSetOrdinals +} + var map_StatefulSetPersistentVolumeClaimRetentionPolicy = map[string]string{ "": "StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", - "whenDeleted": "WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", - "whenScaled": "WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted.", + "whenDeleted": "whenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", + "whenScaled": "whenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted.", } func (StatefulSetPersistentVolumeClaimRetentionPolicy) SwaggerDoc() map[string]string { @@ -242,14 +251,15 @@ var map_StatefulSetSpec = map[string]string{ "": "A StatefulSetSpec is the specification of a StatefulSet.", "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", "selector": "selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format -. For example, a pod in a StatefulSet named \"web\" with index number \"3\" would be named \"web-3\".", "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.", + "minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { @@ -266,8 +276,8 @@ var map_StatefulSetStatus = map[string]string{ "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", - "conditions": "Represents the latest available observations of a statefulset's current state.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate.", + "conditions": "conditions represent the latest available observations of a statefulset's current state.", + "availableReplicas": "availableReplicas is the total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go index 185f868cd..dd73f1a5a 100644 --- a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go @@ -505,6 +505,22 @@ func (in *StatefulSetList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetOrdinals) DeepCopyInto(out *StatefulSetOrdinals) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetOrdinals. +func (in *StatefulSetOrdinals) DeepCopy() *StatefulSetOrdinals { + if in == nil { + return nil + } + out := new(StatefulSetOrdinals) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) { *out = *in @@ -553,6 +569,11 @@ func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { *out = new(StatefulSetPersistentVolumeClaimRetentionPolicy) **out = **in } + if in.Ordinals != nil { + in, out := &in.Ordinals, &out.Ordinals + *out = new(StatefulSetOrdinals) + **out = **in + } return } diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go index df26908fc..6dfb4d5d2 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -833,12 +833,40 @@ func (m *StatefulSetList) XXX_DiscardUnknown() { var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo +func (m *StatefulSetOrdinals) Reset() { *m = StatefulSetOrdinals{} } +func (*StatefulSetOrdinals) ProtoMessage() {} +func (*StatefulSetOrdinals) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{28} +} +func (m *StatefulSetOrdinals) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetOrdinals) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetOrdinals) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetOrdinals.Merge(m, src) +} +func (m *StatefulSetOrdinals) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetOrdinals) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetOrdinals.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetOrdinals proto.InternalMessageInfo + func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Reset() { *m = StatefulSetPersistentVolumeClaimRetentionPolicy{} } func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {} func (*StatefulSetPersistentVolumeClaimRetentionPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{28} + return fileDescriptor_42fe616264472f7e, []int{29} } func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -866,7 +894,7 @@ var xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy proto.Intern func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} func (*StatefulSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{29} + return fileDescriptor_42fe616264472f7e, []int{30} } func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -894,7 +922,7 @@ var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} func (*StatefulSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{30} + return fileDescriptor_42fe616264472f7e, []int{31} } func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -922,7 +950,7 @@ var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{31} + return fileDescriptor_42fe616264472f7e, []int{32} } func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -977,6 +1005,7 @@ func init() { proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta2.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta2.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta2.StatefulSetList") + proto.RegisterType((*StatefulSetOrdinals)(nil), "k8s.io.api.apps.v1beta2.StatefulSetOrdinals") proto.RegisterType((*StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), "k8s.io.api.apps.v1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy") proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta2.StatefulSetSpec") proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta2.StatefulSetStatus") @@ -988,151 +1017,154 @@ func init() { } var fileDescriptor_42fe616264472f7e = []byte{ - // 2295 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcf, 0x6f, 0x1b, 0xc7, - 0xf5, 0xd7, 0xf2, 0x87, 0x44, 0x8e, 0x2c, 0xc9, 0x1e, 0xe9, 0x2b, 0x31, 0xf2, 0xb7, 0xa4, 0xb1, - 0x31, 0x1c, 0x25, 0xb6, 0x49, 0x5b, 0xf9, 0x81, 0xc4, 0x6e, 0x93, 0x8a, 0x52, 0x6a, 0x3b, 0x90, - 0x64, 0x66, 0x64, 0x39, 0x68, 0xd0, 0x1f, 0x1e, 0x91, 0x63, 0x6a, 0xa3, 0xe5, 0xee, 0x62, 0x77, - 0x96, 0x31, 0xd1, 0x4b, 0xaf, 0x05, 0x0a, 0xb4, 0xbd, 0xf6, 0x9f, 0xe8, 0xad, 0x28, 0x1a, 0xf4, - 0x52, 0x04, 0x81, 0x8f, 0x41, 0x2f, 0x49, 0x2f, 0x44, 0xcd, 0x9c, 0x8a, 0xa2, 0xb7, 0xf6, 0x62, - 0xa0, 0x40, 0x31, 0xb3, 0xb3, 0xbf, 0x77, 0xcd, 0xa5, 0x62, 0x2b, 0x4d, 0x90, 0x1b, 0x77, 0xde, - 0x7b, 0x9f, 0x79, 0x33, 0xf3, 0xde, 0xbc, 0xcf, 0xcc, 0x10, 0x7c, 0xff, 0xe8, 0x75, 0xab, 0xae, - 0xe8, 0x8d, 0x23, 0xfb, 0x80, 0x98, 0x1a, 0xa1, 0xc4, 0x6a, 0xf4, 0x89, 0xd6, 0xd1, 0xcd, 0x86, - 0x10, 0x60, 0x43, 0x69, 0x60, 0xc3, 0xb0, 0x1a, 0xfd, 0xab, 0x07, 0x84, 0xe2, 0xf5, 0x46, 0x97, - 0x68, 0xc4, 0xc4, 0x94, 0x74, 0xea, 0x86, 0xa9, 0x53, 0x1d, 0xae, 0x38, 0x8a, 0x75, 0x6c, 0x28, - 0x75, 0xa6, 0x58, 0x17, 0x8a, 0xab, 0x97, 0xbb, 0x0a, 0x3d, 0xb4, 0x0f, 0xea, 0x6d, 0xbd, 0xd7, - 0xe8, 0xea, 0x5d, 0xbd, 0xc1, 0xf5, 0x0f, 0xec, 0xfb, 0xfc, 0x8b, 0x7f, 0xf0, 0x5f, 0x0e, 0xce, - 0xaa, 0x1c, 0xe8, 0xb0, 0xad, 0x9b, 0xa4, 0xd1, 0xbf, 0x1a, 0xed, 0x6b, 0xf5, 0x15, 0x5f, 0xa7, - 0x87, 0xdb, 0x87, 0x8a, 0x46, 0xcc, 0x41, 0xc3, 0x38, 0xea, 0xb2, 0x06, 0xab, 0xd1, 0x23, 0x14, - 0x27, 0x59, 0x35, 0xd2, 0xac, 0x4c, 0x5b, 0xa3, 0x4a, 0x8f, 0xc4, 0x0c, 0x5e, 0x1b, 0x67, 0x60, - 0xb5, 0x0f, 0x49, 0x0f, 0xc7, 0xec, 0x5e, 0x4e, 0xb3, 0xb3, 0xa9, 0xa2, 0x36, 0x14, 0x8d, 0x5a, - 0xd4, 0x8c, 0x1a, 0xc9, 0xff, 0x96, 0x00, 0xdc, 0xd4, 0x35, 0x6a, 0xea, 0xaa, 0x4a, 0x4c, 0x44, - 0xfa, 0x8a, 0xa5, 0xe8, 0x1a, 0xbc, 0x07, 0x4a, 0x6c, 0x3c, 0x1d, 0x4c, 0x71, 0x45, 0x3a, 0x27, - 0xad, 0xcd, 0xae, 0x5f, 0xa9, 0xfb, 0x33, 0xed, 0xc1, 0xd7, 0x8d, 0xa3, 0x2e, 0x6b, 0xb0, 0xea, - 0x4c, 0xbb, 0xde, 0xbf, 0x5a, 0xbf, 0x7d, 0xf0, 0x01, 0x69, 0xd3, 0x1d, 0x42, 0x71, 0x13, 0x3e, - 0x1c, 0xd6, 0xa6, 0x46, 0xc3, 0x1a, 0xf0, 0xdb, 0x90, 0x87, 0x0a, 0x6f, 0x83, 0x02, 0x47, 0xcf, - 0x71, 0xf4, 0xcb, 0xa9, 0xe8, 0x62, 0xd0, 0x75, 0x84, 0x3f, 0x7c, 0xfb, 0x01, 0x25, 0x1a, 0x73, - 0xaf, 0x79, 0x4a, 0x40, 0x17, 0xb6, 0x30, 0xc5, 0x88, 0x03, 0xc1, 0x4b, 0xa0, 0x64, 0x0a, 0xf7, - 0x2b, 0xf9, 0x73, 0xd2, 0x5a, 0xbe, 0x79, 0x5a, 0x68, 0x95, 0xdc, 0x61, 0x21, 0x4f, 0x43, 0x7e, - 0x28, 0x81, 0xe5, 0xf8, 0xb8, 0xb7, 0x15, 0x8b, 0xc2, 0x1f, 0xc5, 0xc6, 0x5e, 0xcf, 0x36, 0x76, - 0x66, 0xcd, 0x47, 0xee, 0x75, 0xec, 0xb6, 0x04, 0xc6, 0xdd, 0x02, 0x45, 0x85, 0x92, 0x9e, 0x55, - 0xc9, 0x9d, 0xcb, 0xaf, 0xcd, 0xae, 0x5f, 0xac, 0xa7, 0x04, 0x70, 0x3d, 0xee, 0x5d, 0x73, 0x4e, - 0xe0, 0x16, 0x6f, 0x31, 0x04, 0xe4, 0x00, 0xc9, 0xbf, 0xc8, 0x81, 0xf2, 0x16, 0x26, 0x3d, 0x5d, - 0xdb, 0x23, 0xf4, 0x04, 0x56, 0xee, 0x26, 0x28, 0x58, 0x06, 0x69, 0x8b, 0x95, 0xbb, 0x90, 0x3a, - 0x00, 0xcf, 0xa7, 0x3d, 0x83, 0xb4, 0xfd, 0x25, 0x63, 0x5f, 0x88, 0x23, 0xc0, 0x16, 0x98, 0xb6, - 0x28, 0xa6, 0xb6, 0xc5, 0x17, 0x6c, 0x76, 0x7d, 0x2d, 0x03, 0x16, 0xd7, 0x6f, 0xce, 0x0b, 0xb4, - 0x69, 0xe7, 0x1b, 0x09, 0x1c, 0xf9, 0xef, 0x39, 0x00, 0x3d, 0xdd, 0x4d, 0x5d, 0xeb, 0x28, 0x94, - 0x85, 0xf3, 0x35, 0x50, 0xa0, 0x03, 0x83, 0xf0, 0x09, 0x29, 0x37, 0x2f, 0xb8, 0xae, 0xdc, 0x19, - 0x18, 0xe4, 0xf1, 0xb0, 0xb6, 0x1c, 0xb7, 0x60, 0x12, 0xc4, 0x6d, 0xe0, 0xb6, 0xe7, 0x64, 0x8e, - 0x5b, 0xbf, 0x12, 0xee, 0xfa, 0xf1, 0xb0, 0x96, 0xb0, 0x77, 0xd4, 0x3d, 0xa4, 0xb0, 0x83, 0xb0, - 0x0f, 0xa0, 0x8a, 0x2d, 0x7a, 0xc7, 0xc4, 0x9a, 0xe5, 0xf4, 0xa4, 0xf4, 0x88, 0x18, 0xfe, 0x4b, - 0xd9, 0x16, 0x8a, 0x59, 0x34, 0x57, 0x85, 0x17, 0x70, 0x3b, 0x86, 0x86, 0x12, 0x7a, 0x80, 0x17, - 0xc0, 0xb4, 0x49, 0xb0, 0xa5, 0x6b, 0x95, 0x02, 0x1f, 0x85, 0x37, 0x81, 0x88, 0xb7, 0x22, 0x21, - 0x85, 0x2f, 0x82, 0x99, 0x1e, 0xb1, 0x2c, 0xdc, 0x25, 0x95, 0x22, 0x57, 0x5c, 0x10, 0x8a, 0x33, - 0x3b, 0x4e, 0x33, 0x72, 0xe5, 0xf2, 0xef, 0x25, 0x30, 0xe7, 0xcd, 0xdc, 0x09, 0x64, 0xce, 0x8d, - 0x70, 0xe6, 0xc8, 0xe3, 0x83, 0x25, 0x25, 0x61, 0x3e, 0xce, 0x07, 0x1c, 0x67, 0xe1, 0x08, 0x7f, - 0x0c, 0x4a, 0x16, 0x51, 0x49, 0x9b, 0xea, 0xa6, 0x70, 0xfc, 0xe5, 0x8c, 0x8e, 0xe3, 0x03, 0xa2, - 0xee, 0x09, 0xd3, 0xe6, 0x29, 0xe6, 0xb9, 0xfb, 0x85, 0x3c, 0x48, 0xf8, 0x2e, 0x28, 0x51, 0xd2, - 0x33, 0x54, 0x4c, 0x89, 0xc8, 0x9a, 0xe7, 0x83, 0xce, 0xb3, 0x98, 0x61, 0x60, 0x2d, 0xbd, 0x73, - 0x47, 0xa8, 0xf1, 0x94, 0xf1, 0x26, 0xc3, 0x6d, 0x45, 0x1e, 0x0c, 0x34, 0xc0, 0xbc, 0x6d, 0x74, - 0x98, 0x26, 0x65, 0xdb, 0x79, 0x77, 0x20, 0x62, 0xe8, 0xca, 0xf8, 0x59, 0xd9, 0x0f, 0xd9, 0x35, - 0x97, 0x45, 0x2f, 0xf3, 0xe1, 0x76, 0x14, 0xc1, 0x87, 0x1b, 0x60, 0xa1, 0xa7, 0x68, 0x88, 0xe0, - 0xce, 0x60, 0x8f, 0xb4, 0x75, 0xad, 0x63, 0xf1, 0x50, 0x2a, 0x36, 0x57, 0x04, 0xc0, 0xc2, 0x4e, - 0x58, 0x8c, 0xa2, 0xfa, 0x70, 0x1b, 0x2c, 0xb9, 0x1b, 0xf0, 0x4d, 0xc5, 0xa2, 0xba, 0x39, 0xd8, - 0x56, 0x7a, 0x0a, 0xad, 0x4c, 0x73, 0x9c, 0xca, 0x68, 0x58, 0x5b, 0x42, 0x09, 0x72, 0x94, 0x68, - 0x25, 0xff, 0x66, 0x1a, 0x2c, 0x44, 0xf6, 0x05, 0x78, 0x17, 0x2c, 0xb7, 0x6d, 0xd3, 0x24, 0x1a, - 0xdd, 0xb5, 0x7b, 0x07, 0xc4, 0xdc, 0x6b, 0x1f, 0x92, 0x8e, 0xad, 0x92, 0x0e, 0x5f, 0xd6, 0x62, - 0xb3, 0x2a, 0x7c, 0x5d, 0xde, 0x4c, 0xd4, 0x42, 0x29, 0xd6, 0xf0, 0x1d, 0x00, 0x35, 0xde, 0xb4, - 0xa3, 0x58, 0x96, 0x87, 0x99, 0xe3, 0x98, 0x5e, 0x2a, 0xee, 0xc6, 0x34, 0x50, 0x82, 0x15, 0xf3, - 0xb1, 0x43, 0x2c, 0xc5, 0x24, 0x9d, 0xa8, 0x8f, 0xf9, 0xb0, 0x8f, 0x5b, 0x89, 0x5a, 0x28, 0xc5, - 0x1a, 0xbe, 0x0a, 0x66, 0x9d, 0xde, 0xf8, 0x9c, 0x8b, 0xc5, 0x59, 0x14, 0x60, 0xb3, 0xbb, 0xbe, - 0x08, 0x05, 0xf5, 0xd8, 0xd0, 0xf4, 0x03, 0x8b, 0x98, 0x7d, 0xd2, 0xb9, 0xe1, 0x90, 0x03, 0x56, - 0x41, 0x8b, 0xbc, 0x82, 0x7a, 0x43, 0xbb, 0x1d, 0xd3, 0x40, 0x09, 0x56, 0x6c, 0x68, 0x4e, 0xd4, - 0xc4, 0x86, 0x36, 0x1d, 0x1e, 0xda, 0x7e, 0xa2, 0x16, 0x4a, 0xb1, 0x66, 0xb1, 0xe7, 0xb8, 0xbc, - 0xd1, 0xc7, 0x8a, 0x8a, 0x0f, 0x54, 0x52, 0x99, 0x09, 0xc7, 0xde, 0x6e, 0x58, 0x8c, 0xa2, 0xfa, - 0xf0, 0x06, 0x38, 0xe3, 0x34, 0xed, 0x6b, 0xd8, 0x03, 0x29, 0x71, 0x90, 0xe7, 0x04, 0xc8, 0x99, - 0xdd, 0xa8, 0x02, 0x8a, 0xdb, 0xc0, 0x6b, 0x60, 0xbe, 0xad, 0xab, 0x2a, 0x8f, 0xc7, 0x4d, 0xdd, - 0xd6, 0x68, 0xa5, 0xcc, 0x51, 0x20, 0xcb, 0xa1, 0xcd, 0x90, 0x04, 0x45, 0x34, 0xe1, 0x4f, 0x01, - 0x68, 0xbb, 0x85, 0xc1, 0xaa, 0x80, 0x31, 0x0c, 0x20, 0x5e, 0x96, 0xfc, 0xca, 0xec, 0x35, 0x59, - 0x28, 0x00, 0x29, 0x7f, 0x2c, 0x81, 0x95, 0x94, 0x44, 0x87, 0x6f, 0x85, 0x8a, 0xe0, 0xc5, 0x48, - 0x11, 0x3c, 0x9b, 0x62, 0x16, 0xa8, 0x84, 0x87, 0x60, 0x8e, 0x11, 0x12, 0x45, 0xeb, 0x3a, 0x2a, - 0x62, 0x2f, 0x6b, 0xa4, 0x0e, 0x00, 0x05, 0xb5, 0xfd, 0x5d, 0xf9, 0xcc, 0x68, 0x58, 0x9b, 0x0b, - 0xc9, 0x50, 0x18, 0x58, 0xfe, 0x65, 0x0e, 0x80, 0x2d, 0x62, 0xa8, 0xfa, 0xa0, 0x47, 0xb4, 0x93, - 0xe0, 0x34, 0xb7, 0x42, 0x9c, 0xe6, 0x85, 0xf4, 0x25, 0xf1, 0x9c, 0x4a, 0x25, 0x35, 0xef, 0x46, - 0x48, 0xcd, 0x8b, 0x59, 0xc0, 0x9e, 0xcc, 0x6a, 0x3e, 0xcb, 0x83, 0x45, 0x5f, 0xd9, 0xa7, 0x35, - 0xd7, 0x43, 0x2b, 0xfa, 0x42, 0x64, 0x45, 0x57, 0x12, 0x4c, 0x9e, 0x19, 0xaf, 0xf9, 0x00, 0xcc, - 0x33, 0xd6, 0xe1, 0xac, 0x1f, 0xe7, 0x34, 0xd3, 0x13, 0x73, 0x1a, 0xaf, 0x12, 0x6d, 0x87, 0x90, - 0x50, 0x04, 0x39, 0x85, 0x43, 0xcd, 0x7c, 0x1d, 0x39, 0xd4, 0x1f, 0x24, 0x30, 0xef, 0x2f, 0xd3, - 0x09, 0x90, 0xa8, 0x9b, 0x61, 0x12, 0xf5, 0x7c, 0x86, 0xe0, 0x4c, 0x61, 0x51, 0x9f, 0x15, 0x82, - 0xae, 0x73, 0x1a, 0xb5, 0xc6, 0x8e, 0x60, 0x86, 0xaa, 0xb4, 0xb1, 0x25, 0xea, 0xed, 0x29, 0xe7, - 0xf8, 0xe5, 0xb4, 0x21, 0x4f, 0x1a, 0x22, 0x5c, 0xb9, 0x67, 0x4b, 0xb8, 0xf2, 0x4f, 0x87, 0x70, - 0xfd, 0x10, 0x94, 0x2c, 0x97, 0x6a, 0x15, 0x38, 0xe4, 0xc5, 0x4c, 0x89, 0x2d, 0x58, 0x96, 0x07, - 0xed, 0xf1, 0x2b, 0x0f, 0x2e, 0x89, 0x59, 0x15, 0xbf, 0x4a, 0x66, 0xc5, 0x02, 0xdd, 0xc0, 0xb6, - 0x45, 0x3a, 0x3c, 0xa9, 0x4a, 0x7e, 0xa0, 0xb7, 0x78, 0x2b, 0x12, 0x52, 0xb8, 0x0f, 0x56, 0x0c, - 0x53, 0xef, 0x9a, 0xc4, 0xb2, 0xb6, 0x08, 0xee, 0xa8, 0x8a, 0x46, 0xdc, 0x01, 0x38, 0x35, 0xf1, - 0xec, 0x68, 0x58, 0x5b, 0x69, 0x25, 0xab, 0xa0, 0x34, 0x5b, 0xf9, 0xcf, 0x05, 0x70, 0x3a, 0xba, - 0x37, 0xa6, 0xd0, 0x14, 0xe9, 0x58, 0x34, 0xe5, 0x52, 0x20, 0x4e, 0x1d, 0x0e, 0x17, 0xb8, 0x2a, - 0x88, 0xc5, 0xea, 0x06, 0x58, 0x10, 0xb4, 0xc4, 0x15, 0x0a, 0xa2, 0xe6, 0x2d, 0xcf, 0x7e, 0x58, - 0x8c, 0xa2, 0xfa, 0xf0, 0x3a, 0x98, 0x33, 0x39, 0xf3, 0x72, 0x01, 0x1c, 0xf6, 0xf2, 0x7f, 0x02, - 0x60, 0x0e, 0x05, 0x85, 0x28, 0xac, 0xcb, 0x98, 0x8b, 0x4f, 0x48, 0x5c, 0x80, 0x42, 0x98, 0xb9, - 0x6c, 0x44, 0x15, 0x50, 0xdc, 0x06, 0xee, 0x80, 0x45, 0x5b, 0x8b, 0x43, 0x39, 0xb1, 0x76, 0x56, - 0x40, 0x2d, 0xee, 0xc7, 0x55, 0x50, 0x92, 0x1d, 0xbc, 0x17, 0x22, 0x33, 0xd3, 0x7c, 0x3f, 0xb9, - 0x94, 0x21, 0x27, 0x32, 0xb3, 0x99, 0x04, 0xaa, 0x55, 0xca, 0x4a, 0xb5, 0xe4, 0x8f, 0x24, 0x00, - 0xe3, 0x79, 0x38, 0xf6, 0x26, 0x20, 0x66, 0x11, 0xa8, 0x98, 0x4a, 0x32, 0xff, 0xb9, 0x92, 0x91, - 0xff, 0xf8, 0x1b, 0x6a, 0x36, 0x02, 0x24, 0x26, 0xfa, 0x64, 0x2e, 0x75, 0xb2, 0x12, 0x20, 0xdf, - 0xa9, 0xa7, 0x40, 0x80, 0x02, 0x60, 0x4f, 0x26, 0x40, 0xff, 0xc8, 0x81, 0x45, 0x5f, 0x39, 0x33, - 0x01, 0x4a, 0x30, 0xf9, 0xf6, 0x62, 0x27, 0x1b, 0x29, 0xf1, 0xa7, 0xee, 0x7f, 0x89, 0x94, 0xf8, - 0x5e, 0xa5, 0x90, 0x92, 0xdf, 0xe5, 0x82, 0xae, 0x4f, 0x48, 0x4a, 0x9e, 0xc2, 0x0d, 0xc7, 0xd7, - 0x8e, 0xd7, 0xc8, 0x9f, 0xe4, 0xc1, 0xe9, 0x68, 0x1e, 0x86, 0x0a, 0xa4, 0x34, 0xb6, 0x40, 0xb6, - 0xc0, 0xd2, 0x7d, 0x5b, 0x55, 0x07, 0x7c, 0x0c, 0x81, 0x2a, 0xe9, 0x94, 0xd6, 0xff, 0x17, 0x96, - 0x4b, 0x3f, 0x48, 0xd0, 0x41, 0x89, 0x96, 0xf1, 0x7a, 0x59, 0xf8, 0xb2, 0xf5, 0xb2, 0x78, 0x8c, - 0x7a, 0x99, 0x4c, 0x39, 0xf2, 0xc7, 0xa2, 0x1c, 0x93, 0x15, 0xcb, 0x84, 0x8d, 0x6b, 0xec, 0xd1, - 0x7f, 0x24, 0x81, 0xe5, 0xe4, 0x03, 0x37, 0x54, 0xc1, 0x7c, 0x0f, 0x3f, 0x08, 0x5e, 0x7c, 0x8c, - 0x2b, 0x22, 0x36, 0x55, 0xd4, 0xba, 0xf3, 0x64, 0x54, 0xbf, 0xa5, 0xd1, 0xdb, 0xe6, 0x1e, 0x35, - 0x15, 0xad, 0xeb, 0x54, 0xde, 0x9d, 0x10, 0x16, 0x8a, 0x60, 0xc3, 0xf7, 0x41, 0xa9, 0x87, 0x1f, - 0xec, 0xd9, 0x66, 0x37, 0xa9, 0x42, 0x66, 0xeb, 0x87, 0x27, 0xc0, 0x8e, 0x40, 0x41, 0x1e, 0x9e, - 0xfc, 0x85, 0x04, 0x56, 0x52, 0xaa, 0xea, 0x37, 0x68, 0x94, 0x7f, 0x92, 0xc0, 0xb9, 0xd0, 0x28, - 0x59, 0x5a, 0x92, 0xfb, 0xb6, 0xca, 0x33, 0x54, 0x30, 0x99, 0x8b, 0xa0, 0x6c, 0x60, 0x93, 0x2a, - 0x1e, 0x0f, 0x2e, 0x36, 0xe7, 0x46, 0xc3, 0x5a, 0xb9, 0xe5, 0x36, 0x22, 0x5f, 0x9e, 0x30, 0x37, - 0xb9, 0x67, 0x37, 0x37, 0xf2, 0x7f, 0x24, 0x50, 0xdc, 0x6b, 0x63, 0x95, 0x9c, 0x00, 0x71, 0xd9, - 0x0a, 0x11, 0x97, 0xf4, 0x47, 0x01, 0xee, 0x4f, 0x2a, 0x67, 0xd9, 0x8e, 0x70, 0x96, 0xf3, 0x63, - 0x70, 0x9e, 0x4c, 0x57, 0xde, 0x00, 0x65, 0xaf, 0xbb, 0xc9, 0xf6, 0x52, 0xf9, 0xb7, 0x39, 0x30, - 0x1b, 0xe8, 0x62, 0xc2, 0x9d, 0xf8, 0x5e, 0xa8, 0xfc, 0xb0, 0x3d, 0x66, 0x3d, 0xcb, 0x40, 0xea, - 0x6e, 0xa9, 0x79, 0x5b, 0xa3, 0x66, 0xf0, 0xac, 0x1a, 0xaf, 0x40, 0x6f, 0x82, 0x79, 0x8a, 0xcd, - 0x2e, 0xa1, 0xae, 0x8c, 0x4f, 0x58, 0xd9, 0xbf, 0xbb, 0xb9, 0x13, 0x92, 0xa2, 0x88, 0xf6, 0xea, - 0x75, 0x30, 0x17, 0xea, 0x0c, 0x9e, 0x06, 0xf9, 0x23, 0x32, 0x70, 0x18, 0x1c, 0x62, 0x3f, 0xe1, - 0x12, 0x28, 0xf6, 0xb1, 0x6a, 0x3b, 0x21, 0x5a, 0x46, 0xce, 0xc7, 0xb5, 0xdc, 0xeb, 0x92, 0xfc, - 0x2b, 0x36, 0x39, 0x7e, 0x2a, 0x9c, 0x40, 0x74, 0xbd, 0x13, 0x8a, 0xae, 0xf4, 0xf7, 0xc9, 0x60, - 0x82, 0xa6, 0xc5, 0x18, 0x8a, 0xc4, 0xd8, 0x4b, 0x99, 0xd0, 0x9e, 0x1c, 0x69, 0xff, 0xcc, 0x81, - 0xa5, 0x80, 0xb6, 0xcf, 0x8c, 0xbf, 0x1b, 0x62, 0xc6, 0x6b, 0x11, 0x66, 0x5c, 0x49, 0xb2, 0xf9, - 0x96, 0x1a, 0x8f, 0xa7, 0xc6, 0x7f, 0x94, 0xc0, 0x42, 0x60, 0xee, 0x4e, 0x80, 0x1b, 0xdf, 0x0a, - 0x73, 0xe3, 0xf3, 0x59, 0x82, 0x26, 0x85, 0x1c, 0xff, 0x4b, 0x02, 0x8d, 0x80, 0x56, 0x8b, 0x98, - 0x96, 0x62, 0x51, 0xa2, 0xd1, 0xbb, 0xba, 0x6a, 0xf7, 0xc8, 0xa6, 0x8a, 0x95, 0x1e, 0x22, 0xac, - 0x41, 0xd1, 0xb5, 0x96, 0xae, 0x2a, 0xed, 0x01, 0xc4, 0x60, 0xf6, 0xc3, 0x43, 0xa2, 0x6d, 0x11, - 0x95, 0x50, 0xf1, 0x8a, 0x56, 0x6e, 0xbe, 0xe5, 0x3e, 0x2a, 0xbd, 0xe7, 0x8b, 0x1e, 0x0f, 0x6b, - 0x6b, 0x59, 0x10, 0x79, 0x94, 0x05, 0x31, 0xe1, 0x4f, 0x00, 0x60, 0x9f, 0x7c, 0x3f, 0xea, 0x88, - 0x80, 0x7b, 0xd3, 0xcd, 0xca, 0xf7, 0x3c, 0xc9, 0x44, 0x1d, 0x04, 0x10, 0xe5, 0xbf, 0xce, 0x84, - 0xd6, 0xec, 0x1b, 0x7f, 0x53, 0xf9, 0x33, 0xb0, 0xd4, 0xf7, 0x67, 0xc7, 0x55, 0x60, 0x1c, 0x3a, - 0x1f, 0x3d, 0x8d, 0x7b, 0xf0, 0x49, 0xf3, 0xea, 0x33, 0xf7, 0xbb, 0x09, 0x70, 0x28, 0xb1, 0x13, - 0xf8, 0x2a, 0x98, 0x65, 0xdc, 0x57, 0x69, 0x93, 0x5d, 0xdc, 0x73, 0xf3, 0xc9, 0x7b, 0x84, 0xdc, - 0xf3, 0x45, 0x28, 0xa8, 0x07, 0x0f, 0xc1, 0xa2, 0xa1, 0x77, 0x76, 0xb0, 0x86, 0xbb, 0x84, 0x91, - 0x39, 0x67, 0x29, 0xf9, 0xf5, 0x65, 0xb9, 0xf9, 0x9a, 0x7b, 0x35, 0xd5, 0x8a, 0xab, 0xb0, 0x63, - 0x7e, 0x42, 0x33, 0x0f, 0x82, 0x24, 0x48, 0x68, 0xc6, 0x1e, 0xce, 0x9d, 0x87, 0x83, 0xf5, 0x2c, - 0x89, 0x75, 0xcc, 0xa7, 0xf3, 0xb4, 0xdb, 0xd9, 0xd2, 0xb1, 0x6e, 0x67, 0x13, 0x8e, 0xa9, 0xe5, - 0x09, 0x8f, 0xa9, 0x9f, 0x48, 0xe0, 0xbc, 0x91, 0x21, 0x97, 0x2a, 0x80, 0xcf, 0xcd, 0xcd, 0x2c, - 0x73, 0x93, 0x25, 0x37, 0x9b, 0x6b, 0xa3, 0x61, 0xed, 0x7c, 0x16, 0x4d, 0x94, 0xc9, 0x3f, 0xf9, - 0xa3, 0x22, 0x38, 0x13, 0xab, 0x96, 0x5f, 0xe1, 0x5d, 0x71, 0xec, 0xe0, 0x9a, 0x9f, 0xe0, 0xe0, - 0xba, 0x01, 0x16, 0xc4, 0xdf, 0x0f, 0x22, 0xe7, 0x5e, 0x6f, 0x61, 0x37, 0xc3, 0x62, 0x14, 0xd5, - 0x4f, 0xba, 0xab, 0x2e, 0x4e, 0x78, 0x57, 0x1d, 0xf4, 0x42, 0xfc, 0x9d, 0xce, 0x49, 0xc3, 0xb8, - 0x17, 0xe2, 0x5f, 0x75, 0x51, 0x7d, 0x46, 0x12, 0x1d, 0x54, 0x0f, 0x61, 0x26, 0x4c, 0x12, 0xf7, - 0x43, 0x52, 0x14, 0xd1, 0xfe, 0x52, 0x4f, 0xec, 0x38, 0xe1, 0x89, 0xfd, 0x72, 0x96, 0xf8, 0xcd, - 0x7e, 0x2d, 0x9d, 0x78, 0xc1, 0x30, 0x3b, 0xf9, 0x05, 0x83, 0xfc, 0x17, 0x09, 0x3c, 0x97, 0xba, - 0xbb, 0xc0, 0x8d, 0x10, 0x85, 0xbb, 0x1c, 0xa1, 0x70, 0xdf, 0x49, 0x35, 0x0c, 0xf0, 0x38, 0x33, - 0xf9, 0xc6, 0xfa, 0x8d, 0x6c, 0x37, 0xd6, 0x09, 0xa7, 0xce, 0xf1, 0x57, 0xd7, 0xcd, 0xef, 0x3d, - 0x7c, 0x54, 0x9d, 0xfa, 0xf4, 0x51, 0x75, 0xea, 0xf3, 0x47, 0xd5, 0xa9, 0x9f, 0x8f, 0xaa, 0xd2, - 0xc3, 0x51, 0x55, 0xfa, 0x74, 0x54, 0x95, 0x3e, 0x1f, 0x55, 0xa5, 0xbf, 0x8d, 0xaa, 0xd2, 0xaf, - 0xbf, 0xa8, 0x4e, 0xbd, 0xbf, 0x92, 0xf2, 0x07, 0xdf, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x5c, - 0xbc, 0x64, 0x9c, 0x13, 0x2c, 0x00, 0x00, + // 2345 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xf7, 0xf2, 0x43, 0x26, 0x87, 0x96, 0x64, 0x8f, 0x54, 0x89, 0x91, 0x5b, 0xd2, 0x58, 0x1b, + 0x8e, 0x12, 0xdb, 0xa4, 0xad, 0x7c, 0x20, 0xb1, 0xdb, 0xa4, 0xa2, 0x94, 0xda, 0x0e, 0xf4, 0xc1, + 0x0c, 0x2d, 0x07, 0x0d, 0xfa, 0xe1, 0x11, 0x39, 0xa6, 0x36, 0x5a, 0xee, 0x2e, 0x76, 0x87, 0x8c, + 0x89, 0x5e, 0x7a, 0x2d, 0x50, 0xa0, 0xed, 0xb5, 0xff, 0x44, 0xd1, 0x4b, 0x51, 0x34, 0xe8, 0xa5, + 0x08, 0x02, 0x1f, 0x83, 0x5e, 0x92, 0x13, 0x51, 0x33, 0xa7, 0xa2, 0xe8, 0xad, 0xbd, 0x18, 0x28, + 0x50, 0xcc, 0xec, 0xec, 0xf7, 0xae, 0xb9, 0x54, 0x6c, 0xe5, 0x03, 0xb9, 0x71, 0xe7, 0xbd, 0xf7, + 0x9b, 0x37, 0x33, 0xef, 0xcd, 0xfb, 0xcd, 0x0c, 0xc1, 0x0f, 0x0f, 0x5f, 0xb3, 0x6a, 0x8a, 0x5e, + 0x3f, 0xec, 0xef, 0x13, 0x53, 0x23, 0x94, 0x58, 0xf5, 0x01, 0xd1, 0x3a, 0xba, 0x59, 0x17, 0x02, + 0x6c, 0x28, 0x75, 0x6c, 0x18, 0x56, 0x7d, 0x70, 0x6d, 0x9f, 0x50, 0xbc, 0x56, 0xef, 0x12, 0x8d, + 0x98, 0x98, 0x92, 0x4e, 0xcd, 0x30, 0x75, 0xaa, 0xc3, 0x65, 0x5b, 0xb1, 0x86, 0x0d, 0xa5, 0xc6, + 0x14, 0x6b, 0x42, 0x71, 0xe5, 0x4a, 0x57, 0xa1, 0x07, 0xfd, 0xfd, 0x5a, 0x5b, 0xef, 0xd5, 0xbb, + 0x7a, 0x57, 0xaf, 0x73, 0xfd, 0xfd, 0xfe, 0x7d, 0xfe, 0xc5, 0x3f, 0xf8, 0x2f, 0x1b, 0x67, 0x45, + 0xf6, 0x75, 0xd8, 0xd6, 0x4d, 0x52, 0x1f, 0x5c, 0x0b, 0xf7, 0xb5, 0xf2, 0xb2, 0xa7, 0xd3, 0xc3, + 0xed, 0x03, 0x45, 0x23, 0xe6, 0xb0, 0x6e, 0x1c, 0x76, 0x59, 0x83, 0x55, 0xef, 0x11, 0x8a, 0xe3, + 0xac, 0xea, 0x49, 0x56, 0x66, 0x5f, 0xa3, 0x4a, 0x8f, 0x44, 0x0c, 0x5e, 0x9d, 0x64, 0x60, 0xb5, + 0x0f, 0x48, 0x0f, 0x47, 0xec, 0x5e, 0x4a, 0xb2, 0xeb, 0x53, 0x45, 0xad, 0x2b, 0x1a, 0xb5, 0xa8, + 0x19, 0x36, 0x92, 0xff, 0x2b, 0x01, 0xb8, 0xa1, 0x6b, 0xd4, 0xd4, 0x55, 0x95, 0x98, 0x88, 0x0c, + 0x14, 0x4b, 0xd1, 0x35, 0x78, 0x0f, 0x14, 0xd8, 0x78, 0x3a, 0x98, 0xe2, 0xb2, 0x74, 0x4e, 0x5a, + 0x2d, 0xad, 0x5d, 0xad, 0x79, 0x33, 0xed, 0xc2, 0xd7, 0x8c, 0xc3, 0x2e, 0x6b, 0xb0, 0x6a, 0x4c, + 0xbb, 0x36, 0xb8, 0x56, 0xdb, 0xdd, 0x7f, 0x9f, 0xb4, 0xe9, 0x36, 0xa1, 0xb8, 0x01, 0x1f, 0x8e, + 0xaa, 0x27, 0xc6, 0xa3, 0x2a, 0xf0, 0xda, 0x90, 0x8b, 0x0a, 0x77, 0x41, 0x8e, 0xa3, 0x67, 0x38, + 0xfa, 0x95, 0x44, 0x74, 0x31, 0xe8, 0x1a, 0xc2, 0x1f, 0xbc, 0xf5, 0x80, 0x12, 0x8d, 0xb9, 0xd7, + 0x38, 0x25, 0xa0, 0x73, 0x9b, 0x98, 0x62, 0xc4, 0x81, 0xe0, 0x65, 0x50, 0x30, 0x85, 0xfb, 0xe5, + 0xec, 0x39, 0x69, 0x35, 0xdb, 0x38, 0x2d, 0xb4, 0x0a, 0xce, 0xb0, 0x90, 0xab, 0x21, 0x3f, 0x94, + 0xc0, 0x52, 0x74, 0xdc, 0x5b, 0x8a, 0x45, 0xe1, 0x4f, 0x22, 0x63, 0xaf, 0xa5, 0x1b, 0x3b, 0xb3, + 0xe6, 0x23, 0x77, 0x3b, 0x76, 0x5a, 0x7c, 0xe3, 0x6e, 0x82, 0xbc, 0x42, 0x49, 0xcf, 0x2a, 0x67, + 0xce, 0x65, 0x57, 0x4b, 0x6b, 0x97, 0x6a, 0x09, 0x01, 0x5c, 0x8b, 0x7a, 0xd7, 0x98, 0x15, 0xb8, + 0xf9, 0xdb, 0x0c, 0x01, 0xd9, 0x40, 0xf2, 0xaf, 0x32, 0xa0, 0xb8, 0x89, 0x49, 0x4f, 0xd7, 0x5a, + 0x84, 0x1e, 0xc3, 0xca, 0xdd, 0x02, 0x39, 0xcb, 0x20, 0x6d, 0xb1, 0x72, 0x17, 0x13, 0x07, 0xe0, + 0xfa, 0xd4, 0x32, 0x48, 0xdb, 0x5b, 0x32, 0xf6, 0x85, 0x38, 0x02, 0x6c, 0x82, 0x19, 0x8b, 0x62, + 0xda, 0xb7, 0xf8, 0x82, 0x95, 0xd6, 0x56, 0x53, 0x60, 0x71, 0xfd, 0xc6, 0x9c, 0x40, 0x9b, 0xb1, + 0xbf, 0x91, 0xc0, 0x91, 0xff, 0x99, 0x01, 0xd0, 0xd5, 0xdd, 0xd0, 0xb5, 0x8e, 0x42, 0x59, 0x38, + 0x5f, 0x07, 0x39, 0x3a, 0x34, 0x08, 0x9f, 0x90, 0x62, 0xe3, 0xa2, 0xe3, 0xca, 0x9d, 0xa1, 0x41, + 0x1e, 0x8f, 0xaa, 0x4b, 0x51, 0x0b, 0x26, 0x41, 0xdc, 0x06, 0x6e, 0xb9, 0x4e, 0x66, 0xb8, 0xf5, + 0xcb, 0xc1, 0xae, 0x1f, 0x8f, 0xaa, 0x31, 0x7b, 0x47, 0xcd, 0x45, 0x0a, 0x3a, 0x08, 0x07, 0x00, + 0xaa, 0xd8, 0xa2, 0x77, 0x4c, 0xac, 0x59, 0x76, 0x4f, 0x4a, 0x8f, 0x88, 0xe1, 0xbf, 0x98, 0x6e, + 0xa1, 0x98, 0x45, 0x63, 0x45, 0x78, 0x01, 0xb7, 0x22, 0x68, 0x28, 0xa6, 0x07, 0x78, 0x11, 0xcc, + 0x98, 0x04, 0x5b, 0xba, 0x56, 0xce, 0xf1, 0x51, 0xb8, 0x13, 0x88, 0x78, 0x2b, 0x12, 0x52, 0xf8, + 0x02, 0x38, 0xd9, 0x23, 0x96, 0x85, 0xbb, 0xa4, 0x9c, 0xe7, 0x8a, 0xf3, 0x42, 0xf1, 0xe4, 0xb6, + 0xdd, 0x8c, 0x1c, 0xb9, 0xfc, 0x27, 0x09, 0xcc, 0xba, 0x33, 0x77, 0x0c, 0x99, 0x73, 0x33, 0x98, + 0x39, 0xf2, 0xe4, 0x60, 0x49, 0x48, 0x98, 0x8f, 0xb2, 0x3e, 0xc7, 0x59, 0x38, 0xc2, 0x9f, 0x82, + 0x82, 0x45, 0x54, 0xd2, 0xa6, 0xba, 0x29, 0x1c, 0x7f, 0x29, 0xa5, 0xe3, 0x78, 0x9f, 0xa8, 0x2d, + 0x61, 0xda, 0x38, 0xc5, 0x3c, 0x77, 0xbe, 0x90, 0x0b, 0x09, 0xdf, 0x01, 0x05, 0x4a, 0x7a, 0x86, + 0x8a, 0x29, 0x11, 0x59, 0x73, 0xde, 0xef, 0x3c, 0x8b, 0x19, 0x06, 0xd6, 0xd4, 0x3b, 0x77, 0x84, + 0x1a, 0x4f, 0x19, 0x77, 0x32, 0x9c, 0x56, 0xe4, 0xc2, 0x40, 0x03, 0xcc, 0xf5, 0x8d, 0x0e, 0xd3, + 0xa4, 0x6c, 0x3b, 0xef, 0x0e, 0x45, 0x0c, 0x5d, 0x9d, 0x3c, 0x2b, 0x7b, 0x01, 0xbb, 0xc6, 0x92, + 0xe8, 0x65, 0x2e, 0xd8, 0x8e, 0x42, 0xf8, 0x70, 0x1d, 0xcc, 0xf7, 0x14, 0x0d, 0x11, 0xdc, 0x19, + 0xb6, 0x48, 0x5b, 0xd7, 0x3a, 0x16, 0x0f, 0xa5, 0x7c, 0x63, 0x59, 0x00, 0xcc, 0x6f, 0x07, 0xc5, + 0x28, 0xac, 0x0f, 0xb7, 0xc0, 0xa2, 0xb3, 0x01, 0xdf, 0x52, 0x2c, 0xaa, 0x9b, 0xc3, 0x2d, 0xa5, + 0xa7, 0xd0, 0xf2, 0x0c, 0xc7, 0x29, 0x8f, 0x47, 0xd5, 0x45, 0x14, 0x23, 0x47, 0xb1, 0x56, 0xf2, + 0xef, 0x66, 0xc0, 0x7c, 0x68, 0x5f, 0x80, 0x77, 0xc1, 0x52, 0xbb, 0x6f, 0x9a, 0x44, 0xa3, 0x3b, + 0xfd, 0xde, 0x3e, 0x31, 0x5b, 0xed, 0x03, 0xd2, 0xe9, 0xab, 0xa4, 0xc3, 0x97, 0x35, 0xdf, 0xa8, + 0x08, 0x5f, 0x97, 0x36, 0x62, 0xb5, 0x50, 0x82, 0x35, 0x7c, 0x1b, 0x40, 0x8d, 0x37, 0x6d, 0x2b, + 0x96, 0xe5, 0x62, 0x66, 0x38, 0xa6, 0x9b, 0x8a, 0x3b, 0x11, 0x0d, 0x14, 0x63, 0xc5, 0x7c, 0xec, + 0x10, 0x4b, 0x31, 0x49, 0x27, 0xec, 0x63, 0x36, 0xe8, 0xe3, 0x66, 0xac, 0x16, 0x4a, 0xb0, 0x86, + 0xaf, 0x80, 0x92, 0xdd, 0x1b, 0x9f, 0x73, 0xb1, 0x38, 0x0b, 0x02, 0xac, 0xb4, 0xe3, 0x89, 0x90, + 0x5f, 0x8f, 0x0d, 0x4d, 0xdf, 0xb7, 0x88, 0x39, 0x20, 0x9d, 0x9b, 0x36, 0x39, 0x60, 0x15, 0x34, + 0xcf, 0x2b, 0xa8, 0x3b, 0xb4, 0xdd, 0x88, 0x06, 0x8a, 0xb1, 0x62, 0x43, 0xb3, 0xa3, 0x26, 0x32, + 0xb4, 0x99, 0xe0, 0xd0, 0xf6, 0x62, 0xb5, 0x50, 0x82, 0x35, 0x8b, 0x3d, 0xdb, 0xe5, 0xf5, 0x01, + 0x56, 0x54, 0xbc, 0xaf, 0x92, 0xf2, 0xc9, 0x60, 0xec, 0xed, 0x04, 0xc5, 0x28, 0xac, 0x0f, 0x6f, + 0x82, 0x33, 0x76, 0xd3, 0x9e, 0x86, 0x5d, 0x90, 0x02, 0x07, 0x79, 0x4e, 0x80, 0x9c, 0xd9, 0x09, + 0x2b, 0xa0, 0xa8, 0x0d, 0xbc, 0x0e, 0xe6, 0xda, 0xba, 0xaa, 0xf2, 0x78, 0xdc, 0xd0, 0xfb, 0x1a, + 0x2d, 0x17, 0x39, 0x0a, 0x64, 0x39, 0xb4, 0x11, 0x90, 0xa0, 0x90, 0x26, 0xfc, 0x39, 0x00, 0x6d, + 0xa7, 0x30, 0x58, 0x65, 0x30, 0x81, 0x01, 0x44, 0xcb, 0x92, 0x57, 0x99, 0xdd, 0x26, 0x0b, 0xf9, + 0x20, 0xe5, 0x8f, 0x24, 0xb0, 0x9c, 0x90, 0xe8, 0xf0, 0xcd, 0x40, 0x11, 0xbc, 0x14, 0x2a, 0x82, + 0x67, 0x13, 0xcc, 0x7c, 0x95, 0xf0, 0x00, 0xcc, 0x32, 0x42, 0xa2, 0x68, 0x5d, 0x5b, 0x45, 0xec, + 0x65, 0xf5, 0xc4, 0x01, 0x20, 0xbf, 0xb6, 0xb7, 0x2b, 0x9f, 0x19, 0x8f, 0xaa, 0xb3, 0x01, 0x19, + 0x0a, 0x02, 0xcb, 0xbf, 0xce, 0x00, 0xb0, 0x49, 0x0c, 0x55, 0x1f, 0xf6, 0x88, 0x76, 0x1c, 0x9c, + 0xe6, 0x76, 0x80, 0xd3, 0x3c, 0x9f, 0xbc, 0x24, 0xae, 0x53, 0x89, 0xa4, 0xe6, 0x9d, 0x10, 0xa9, + 0x79, 0x21, 0x0d, 0xd8, 0x93, 0x59, 0xcd, 0xa7, 0x59, 0xb0, 0xe0, 0x29, 0x7b, 0xb4, 0xe6, 0x46, + 0x60, 0x45, 0x9f, 0x0f, 0xad, 0xe8, 0x72, 0x8c, 0xc9, 0x33, 0xe3, 0x35, 0xef, 0x83, 0x39, 0xc6, + 0x3a, 0xec, 0xf5, 0xe3, 0x9c, 0x66, 0x66, 0x6a, 0x4e, 0xe3, 0x56, 0xa2, 0xad, 0x00, 0x12, 0x0a, + 0x21, 0x27, 0x70, 0xa8, 0x93, 0x5f, 0x47, 0x0e, 0xf5, 0x67, 0x09, 0xcc, 0x79, 0xcb, 0x74, 0x0c, + 0x24, 0xea, 0x56, 0x90, 0x44, 0x9d, 0x4f, 0x11, 0x9c, 0x09, 0x2c, 0xea, 0xd3, 0x9c, 0xdf, 0x75, + 0x4e, 0xa3, 0x56, 0xd9, 0x11, 0xcc, 0x50, 0x95, 0x36, 0xb6, 0x44, 0xbd, 0x3d, 0x65, 0x1f, 0xbf, + 0xec, 0x36, 0xe4, 0x4a, 0x03, 0x84, 0x2b, 0xf3, 0x6c, 0x09, 0x57, 0xf6, 0xe9, 0x10, 0xae, 0x1f, + 0x83, 0x82, 0xe5, 0x50, 0xad, 0x1c, 0x87, 0xbc, 0x94, 0x2a, 0xb1, 0x05, 0xcb, 0x72, 0xa1, 0x5d, + 0x7e, 0xe5, 0xc2, 0xc5, 0x31, 0xab, 0xfc, 0x97, 0xc9, 0xac, 0x58, 0xa0, 0x1b, 0xb8, 0x6f, 0x91, + 0x0e, 0x4f, 0xaa, 0x82, 0x17, 0xe8, 0x4d, 0xde, 0x8a, 0x84, 0x14, 0xee, 0x81, 0x65, 0xc3, 0xd4, + 0xbb, 0x26, 0xb1, 0xac, 0x4d, 0x82, 0x3b, 0xaa, 0xa2, 0x11, 0x67, 0x00, 0x76, 0x4d, 0x3c, 0x3b, + 0x1e, 0x55, 0x97, 0x9b, 0xf1, 0x2a, 0x28, 0xc9, 0x56, 0xfe, 0x5b, 0x0e, 0x9c, 0x0e, 0xef, 0x8d, + 0x09, 0x34, 0x45, 0x3a, 0x12, 0x4d, 0xb9, 0xec, 0x8b, 0x53, 0x9b, 0xc3, 0xf9, 0xae, 0x0a, 0x22, + 0xb1, 0xba, 0x0e, 0xe6, 0x05, 0x2d, 0x71, 0x84, 0x82, 0xa8, 0xb9, 0xcb, 0xb3, 0x17, 0x14, 0xa3, + 0xb0, 0x3e, 0xbc, 0x01, 0x66, 0x4d, 0xce, 0xbc, 0x1c, 0x00, 0x9b, 0xbd, 0x7c, 0x47, 0x00, 0xcc, + 0x22, 0xbf, 0x10, 0x05, 0x75, 0x19, 0x73, 0xf1, 0x08, 0x89, 0x03, 0x90, 0x0b, 0x32, 0x97, 0xf5, + 0xb0, 0x02, 0x8a, 0xda, 0xc0, 0x6d, 0xb0, 0xd0, 0xd7, 0xa2, 0x50, 0x76, 0xac, 0x9d, 0x15, 0x50, + 0x0b, 0x7b, 0x51, 0x15, 0x14, 0x67, 0x07, 0xef, 0x05, 0xc8, 0xcc, 0x0c, 0xdf, 0x4f, 0x2e, 0xa7, + 0xc8, 0x89, 0xd4, 0x6c, 0x26, 0x86, 0x6a, 0x15, 0xd2, 0x52, 0x2d, 0xf9, 0x43, 0x09, 0xc0, 0x68, + 0x1e, 0x4e, 0xbc, 0x09, 0x88, 0x58, 0xf8, 0x2a, 0xa6, 0x12, 0xcf, 0x7f, 0xae, 0xa6, 0xe4, 0x3f, + 0xde, 0x86, 0x9a, 0x8e, 0x00, 0x89, 0x89, 0x3e, 0x9e, 0x4b, 0x9d, 0xb4, 0x04, 0xc8, 0x73, 0xea, + 0x29, 0x10, 0x20, 0x1f, 0xd8, 0x93, 0x09, 0xd0, 0xbf, 0x32, 0x60, 0xc1, 0x53, 0x4e, 0x4d, 0x80, + 0x62, 0x4c, 0xbe, 0xbd, 0xd8, 0x49, 0x47, 0x4a, 0xbc, 0xa9, 0xfb, 0x2a, 0x91, 0x12, 0xcf, 0xab, + 0x04, 0x52, 0xf2, 0x87, 0x8c, 0xdf, 0xf5, 0x29, 0x49, 0xc9, 0x53, 0xb8, 0xe1, 0xf8, 0xda, 0xf1, + 0x1a, 0xf9, 0xe3, 0x2c, 0x38, 0x1d, 0xce, 0xc3, 0x40, 0x81, 0x94, 0x26, 0x16, 0xc8, 0x26, 0x58, + 0xbc, 0xdf, 0x57, 0xd5, 0x21, 0x1f, 0x83, 0xaf, 0x4a, 0xda, 0xa5, 0xf5, 0xbb, 0xc2, 0x72, 0xf1, + 0x47, 0x31, 0x3a, 0x28, 0xd6, 0x32, 0x5a, 0x2f, 0x73, 0x5f, 0xb4, 0x5e, 0xe6, 0x8f, 0x50, 0x2f, + 0xe3, 0x29, 0x47, 0xf6, 0x48, 0x94, 0x63, 0xba, 0x62, 0x19, 0xb3, 0x71, 0x4d, 0x3c, 0xfa, 0x8f, + 0x25, 0xb0, 0x14, 0x7f, 0xe0, 0x86, 0x2a, 0x98, 0xeb, 0xe1, 0x07, 0xfe, 0x8b, 0x8f, 0x49, 0x45, + 0xa4, 0x4f, 0x15, 0xb5, 0x66, 0x3f, 0x19, 0xd5, 0x6e, 0x6b, 0x74, 0xd7, 0x6c, 0x51, 0x53, 0xd1, + 0xba, 0x76, 0xe5, 0xdd, 0x0e, 0x60, 0xa1, 0x10, 0x36, 0x7c, 0x0f, 0x14, 0x7a, 0xf8, 0x41, 0xab, + 0x6f, 0x76, 0xe3, 0x2a, 0x64, 0xba, 0x7e, 0x78, 0x02, 0x6c, 0x0b, 0x14, 0xe4, 0xe2, 0xc9, 0x9f, + 0x4b, 0x60, 0x39, 0xa1, 0xaa, 0x7e, 0x83, 0x46, 0xf9, 0x57, 0x09, 0x9c, 0x0b, 0x8c, 0x92, 0xa5, + 0x25, 0xb9, 0xdf, 0x57, 0x79, 0x86, 0x0a, 0x26, 0x73, 0x09, 0x14, 0x0d, 0x6c, 0x52, 0xc5, 0xe5, + 0xc1, 0xf9, 0xc6, 0xec, 0x78, 0x54, 0x2d, 0x36, 0x9d, 0x46, 0xe4, 0xc9, 0x63, 0xe6, 0x26, 0xf3, + 0xec, 0xe6, 0x46, 0xfe, 0x9f, 0x04, 0xf2, 0xad, 0x36, 0x56, 0xc9, 0x31, 0x10, 0x97, 0xcd, 0x00, + 0x71, 0x49, 0x7e, 0x14, 0xe0, 0xfe, 0x24, 0x72, 0x96, 0xad, 0x10, 0x67, 0xb9, 0x30, 0x01, 0xe7, + 0xc9, 0x74, 0xe5, 0x75, 0x50, 0x74, 0xbb, 0x9b, 0x6e, 0x2f, 0x95, 0x7f, 0x9f, 0x01, 0x25, 0x5f, + 0x17, 0x53, 0xee, 0xc4, 0xf7, 0x02, 0xe5, 0x87, 0xed, 0x31, 0x6b, 0x69, 0x06, 0x52, 0x73, 0x4a, + 0xcd, 0x5b, 0x1a, 0x35, 0xfd, 0x67, 0xd5, 0x68, 0x05, 0x7a, 0x03, 0xcc, 0x51, 0x6c, 0x76, 0x09, + 0x75, 0x64, 0x7c, 0xc2, 0x8a, 0xde, 0xdd, 0xcd, 0x9d, 0x80, 0x14, 0x85, 0xb4, 0x57, 0x6e, 0x80, + 0xd9, 0x40, 0x67, 0xf0, 0x34, 0xc8, 0x1e, 0x92, 0xa1, 0xcd, 0xe0, 0x10, 0xfb, 0x09, 0x17, 0x41, + 0x7e, 0x80, 0xd5, 0xbe, 0x1d, 0xa2, 0x45, 0x64, 0x7f, 0x5c, 0xcf, 0xbc, 0x26, 0xc9, 0xbf, 0x61, + 0x93, 0xe3, 0xa5, 0xc2, 0x31, 0x44, 0xd7, 0xdb, 0x81, 0xe8, 0x4a, 0x7e, 0x9f, 0xf4, 0x27, 0x68, + 0x52, 0x8c, 0xa1, 0x50, 0x8c, 0xbd, 0x98, 0x0a, 0xed, 0xc9, 0x91, 0xf6, 0xef, 0x0c, 0x58, 0xf4, + 0x69, 0x7b, 0xcc, 0xf8, 0xfb, 0x01, 0x66, 0xbc, 0x1a, 0x62, 0xc6, 0xe5, 0x38, 0x9b, 0x6f, 0xa9, + 0xf1, 0x64, 0x6a, 0xfc, 0x17, 0x09, 0xcc, 0xfb, 0xe6, 0xee, 0x18, 0xb8, 0xf1, 0xed, 0x20, 0x37, + 0xbe, 0x90, 0x26, 0x68, 0x12, 0xc8, 0xf1, 0x75, 0xb0, 0xe0, 0x53, 0xda, 0x35, 0x3b, 0x8a, 0x86, + 0x55, 0x0b, 0x9e, 0x07, 0x79, 0x8b, 0x62, 0x93, 0x3a, 0x45, 0xc4, 0xb1, 0x6d, 0xb1, 0x46, 0x64, + 0xcb, 0xe4, 0xff, 0x48, 0xa0, 0xee, 0x33, 0x6e, 0x12, 0xd3, 0x52, 0x2c, 0x4a, 0x34, 0x7a, 0x57, + 0x57, 0xfb, 0x3d, 0xb2, 0xa1, 0x62, 0xa5, 0x87, 0x08, 0x6b, 0x50, 0x74, 0xad, 0xa9, 0xab, 0x4a, + 0x7b, 0x08, 0x31, 0x28, 0x7d, 0x70, 0x40, 0xb4, 0x4d, 0xa2, 0x12, 0x2a, 0x5e, 0xe0, 0x8a, 0x8d, + 0x37, 0x9d, 0x07, 0xa9, 0x77, 0x3d, 0xd1, 0xe3, 0x51, 0x75, 0x35, 0x0d, 0x22, 0x8f, 0x50, 0x3f, + 0x26, 0xfc, 0x19, 0x00, 0xec, 0x93, 0xef, 0x65, 0x1d, 0x11, 0xac, 0x6f, 0x38, 0x19, 0xfd, 0xae, + 0x2b, 0x99, 0xaa, 0x03, 0x1f, 0xa2, 0xfc, 0xc7, 0x42, 0x60, 0xbd, 0xbf, 0xf1, 0xb7, 0x9c, 0xbf, + 0x00, 0x8b, 0x03, 0x6f, 0x76, 0x1c, 0x05, 0xc6, 0xbf, 0xb3, 0xe1, 0x93, 0xbc, 0x0b, 0x1f, 0x37, + 0xaf, 0x1e, 0xeb, 0xbf, 0x1b, 0x03, 0x87, 0x62, 0x3b, 0x81, 0xaf, 0x80, 0x12, 0xe3, 0xcd, 0x4a, + 0x9b, 0xec, 0xe0, 0x9e, 0x93, 0x8b, 0xee, 0x03, 0x66, 0xcb, 0x13, 0x21, 0xbf, 0x1e, 0x3c, 0x00, + 0x0b, 0x86, 0xde, 0xd9, 0xc6, 0x1a, 0xee, 0x12, 0x46, 0x04, 0xed, 0xa5, 0xe4, 0x57, 0x9f, 0xc5, + 0xc6, 0xab, 0xce, 0xb5, 0x56, 0x33, 0xaa, 0xf2, 0x78, 0x54, 0x5d, 0x8e, 0x69, 0xe6, 0x41, 0x10, + 0x07, 0x09, 0xcd, 0xc8, 0xa3, 0xbb, 0xfd, 0xe8, 0xb0, 0x96, 0x26, 0x29, 0x8f, 0xf8, 0xec, 0x9e, + 0x74, 0xb3, 0x5b, 0x38, 0xd2, 0xcd, 0x6e, 0xcc, 0x11, 0xb7, 0x38, 0xe5, 0x11, 0xf7, 0x63, 0x09, + 0x5c, 0x30, 0x52, 0xe4, 0x52, 0x19, 0xf0, 0xb9, 0xb9, 0x95, 0x66, 0x6e, 0xd2, 0xe4, 0x66, 0x63, + 0x75, 0x3c, 0xaa, 0x5e, 0x48, 0xa3, 0x89, 0x52, 0xf9, 0x07, 0xef, 0x82, 0x82, 0x2e, 0xf6, 0xc0, + 0x72, 0x89, 0xfb, 0x7a, 0x39, 0x8d, 0xaf, 0xce, 0xbe, 0x69, 0xa7, 0xa5, 0xf3, 0x85, 0x5c, 0x2c, + 0xf9, 0xc3, 0x3c, 0x38, 0x13, 0xa9, 0xe0, 0x5f, 0xe2, 0xfd, 0x75, 0xe4, 0x30, 0x9d, 0x9d, 0xe2, + 0x30, 0xbd, 0x0e, 0xe6, 0xc5, 0x5f, 0x22, 0x42, 0x67, 0x71, 0x37, 0x60, 0x36, 0x82, 0x62, 0x14, + 0xd6, 0x8f, 0xbb, 0x3f, 0xcf, 0x4f, 0x79, 0x7f, 0xee, 0xf7, 0x42, 0xfc, 0xc5, 0xcf, 0x4e, 0xef, + 0xa8, 0x17, 0xe2, 0x9f, 0x7e, 0x61, 0x7d, 0x46, 0x5c, 0x6d, 0x54, 0x17, 0xe1, 0x64, 0x90, 0xb8, + 0xee, 0x05, 0xa4, 0x28, 0xa4, 0xfd, 0x85, 0x9e, 0xfd, 0x71, 0xcc, 0xb3, 0xff, 0x95, 0x34, 0xb1, + 0x96, 0xfe, 0xaa, 0x3c, 0xf6, 0xd2, 0xa3, 0x34, 0xfd, 0xa5, 0x87, 0xfc, 0x77, 0x09, 0x3c, 0x97, + 0xb8, 0x6b, 0xc1, 0xf5, 0x00, 0xad, 0xbc, 0x12, 0xa2, 0x95, 0xdf, 0x4b, 0x34, 0xf4, 0x71, 0x4b, + 0x33, 0xfe, 0x16, 0xfd, 0xf5, 0x74, 0xb7, 0xe8, 0x31, 0x27, 0xe1, 0xc9, 0xd7, 0xe9, 0x8d, 0x1f, + 0x3c, 0x7c, 0x54, 0x39, 0xf1, 0xc9, 0xa3, 0xca, 0x89, 0xcf, 0x1e, 0x55, 0x4e, 0xfc, 0x72, 0x5c, + 0x91, 0x1e, 0x8e, 0x2b, 0xd2, 0x27, 0xe3, 0x8a, 0xf4, 0xd9, 0xb8, 0x22, 0xfd, 0x63, 0x5c, 0x91, + 0x7e, 0xfb, 0x79, 0xe5, 0xc4, 0x7b, 0xcb, 0x09, 0x7f, 0x3a, 0xfe, 0x7f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xd1, 0xcb, 0x7d, 0xc7, 0xa7, 0x2c, 0x00, 0x00, } func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { @@ -2573,6 +2605,32 @@ func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *StatefulSetOrdinals) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetOrdinals) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetOrdinals) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2626,6 +2684,18 @@ func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Ordinals != nil { + { + size, err := m.Ordinals.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } if m.PersistentVolumeClaimRetentionPolicy != nil { { size, err := m.PersistentVolumeClaimRetentionPolicy.MarshalToSizedBuffer(dAtA[:i]) @@ -3331,6 +3401,16 @@ func (m *StatefulSetList) Size() (n int) { return n } +func (m *StatefulSetOrdinals) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Start)) + return n +} + func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Size() (n int) { if m == nil { return 0 @@ -3379,6 +3459,10 @@ func (m *StatefulSetSpec) Size() (n int) { l = m.PersistentVolumeClaimRetentionPolicy.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Ordinals != nil { + l = m.Ordinals.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -3837,6 +3921,16 @@ func (this *StatefulSetList) String() string { }, "") return s } +func (this *StatefulSetOrdinals) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetOrdinals{`, + `Start:` + fmt.Sprintf("%v", this.Start) + `,`, + `}`, + }, "") + return s +} func (this *StatefulSetPersistentVolumeClaimRetentionPolicy) String() string { if this == nil { return "nil" @@ -3868,6 +3962,7 @@ func (this *StatefulSetSpec) String() string { `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `PersistentVolumeClaimRetentionPolicy:` + strings.Replace(this.PersistentVolumeClaimRetentionPolicy.String(), "StatefulSetPersistentVolumeClaimRetentionPolicy", "StatefulSetPersistentVolumeClaimRetentionPolicy", 1) + `,`, + `Ordinals:` + strings.Replace(this.Ordinals.String(), "StatefulSetOrdinals", "StatefulSetOrdinals", 1) + `,`, `}`, }, "") return s @@ -8393,6 +8488,75 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } return nil } +func (m *StatefulSetOrdinals) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetOrdinals: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetOrdinals: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -8831,6 +8995,42 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ordinals", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Ordinals == nil { + m.Ordinals = &StatefulSetOrdinals{} + } + if err := m.Ordinals.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto index b5bc8c0ca..ddbe35441 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -131,6 +131,7 @@ message DaemonSetSpec { // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template optional k8s.io.api.core.v1.PodTemplateSpec template = 2; @@ -282,6 +283,7 @@ message DeploymentSpec { optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. @@ -455,7 +457,7 @@ message ReplicaSetSpec { // ReplicaSetStatus represents the current status of a ReplicaSet. message ReplicaSetStatus { - // Replicas is the most recently oberved number of replicas. + // Replicas is the most recently observed number of replicas. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller optional int32 replicas = 1; @@ -519,7 +521,6 @@ message RollingUpdateDaemonSet { // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. - // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } @@ -601,7 +602,7 @@ message ScaleStatus { // actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional // +mapType=atomic map selector = 2; @@ -620,8 +621,9 @@ message ScaleStatus { // more information. // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// // The StatefulSet guarantees that a given network identity will always // map to the same storage identity. message StatefulSet { @@ -667,6 +669,21 @@ message StatefulSetList { repeated StatefulSet items = 2; } +// StatefulSetOrdinals describes the policy used for replica ordinal assignment +// in this StatefulSet. +message StatefulSetOrdinals { + // start is the number representing the first replica's index. It may be used + // to number replicas from an alternate index (eg: 1-indexed) over the default + // 0-indexed names, or to orchestrate progressive movement of replicas from + // one StatefulSet to another. + // If set, replica indices will be in the range: + // [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas). + // If unset, defaults to 0. Replica indices will be in the range: + // [0, .spec.replicas). + // +optional + optional int32 start = 1; +} + // StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs // created from the StatefulSet VolumeClaimTemplates. message StatefulSetPersistentVolumeClaimRetentionPolicy { @@ -702,7 +719,10 @@ message StatefulSetSpec { // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet // will fulfill this Template, but have a unique identity from the rest - // of the StatefulSet. + // of the StatefulSet. Each pod will be named with the format + // -. For example, a pod in a StatefulSet named + // "web" with index number "3" would be named "web-3". + // The only allowed template.spec.restartPolicy value is "Always". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -747,7 +767,6 @@ message StatefulSetSpec { // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) - // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. // +optional optional int32 minReadySeconds = 9; @@ -756,6 +775,14 @@ message StatefulSetSpec { // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. // +optional optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10; + + // ordinals controls the numbering of replica indices in a StatefulSet. The + // default ordinals behavior assigns a "0" index to the first replica and + // increments the index by one for each additional replica requested. Using + // the ordinals field requires the StatefulSetStartOrdinal feature gate to be + // enabled, which is beta. + // +optional + optional StatefulSetOrdinals ordinals = 11; } // StatefulSetStatus represents the current state of a StatefulSet. @@ -800,7 +827,6 @@ message StatefulSetStatus { repeated StatefulSetCondition conditions = 10; // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. - // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. // +optional optional int32 availableReplicas = 11; } diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index 6c2448869..a97ac6fcf 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -43,7 +43,7 @@ type ScaleStatus struct { // actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional // +mapType=atomic Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` @@ -94,8 +94,9 @@ type Scale struct { // more information. // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// // The StatefulSet guarantees that a given network identity will always // map to the same storage identity. type StatefulSet struct { @@ -213,6 +214,21 @@ type StatefulSetPersistentVolumeClaimRetentionPolicy struct { WhenScaled PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty" protobuf:"bytes,2,opt,name=whenScaled,casttype=PersistentVolumeClaimRetentionPolicyType"` } +// StatefulSetOrdinals describes the policy used for replica ordinal assignment +// in this StatefulSet. +type StatefulSetOrdinals struct { + // start is the number representing the first replica's index. It may be used + // to number replicas from an alternate index (eg: 1-indexed) over the default + // 0-indexed names, or to orchestrate progressive movement of replicas from + // one StatefulSet to another. + // If set, replica indices will be in the range: + // [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas). + // If unset, defaults to 0. Replica indices will be in the range: + // [0, .spec.replicas). + // +optional + Start int32 `json:"start" protobuf:"varint,1,opt,name=start"` +} + // A StatefulSetSpec is the specification of a StatefulSet. type StatefulSetSpec struct { // replicas is the desired number of replicas of the given Template. @@ -231,7 +247,10 @@ type StatefulSetSpec struct { // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet // will fulfill this Template, but have a unique identity from the rest - // of the StatefulSet. + // of the StatefulSet. Each pod will be named with the format + // -. For example, a pod in a StatefulSet named + // "web" with index number "3" would be named "web-3". + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -276,7 +295,6 @@ type StatefulSetSpec struct { // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) - // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. // +optional MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` @@ -285,6 +303,14 @@ type StatefulSetSpec struct { // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. // +optional PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"` + + // ordinals controls the numbering of replica indices in a StatefulSet. The + // default ordinals behavior assigns a "0" index to the first replica and + // increments the index by one for each additional replica requested. Using + // the ordinals field requires the StatefulSetStartOrdinal feature gate to be + // enabled, which is beta. + // +optional + Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } // StatefulSetStatus represents the current state of a StatefulSet. @@ -329,7 +355,6 @@ type StatefulSetStatus struct { Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. - // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. // +optional AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"` } @@ -405,6 +430,7 @@ type DeploymentSpec struct { Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // The deployment strategy to use to replace existing pods with new ones. @@ -650,7 +676,6 @@ type RollingUpdateDaemonSet struct { // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. - // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } @@ -667,6 +692,7 @@ type DaemonSetSpec struct { // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` @@ -901,7 +927,7 @@ type ReplicaSetSpec struct { // ReplicaSetStatus represents the current status of a ReplicaSet. type ReplicaSetStatus struct { - // Replicas is the most recently oberved number of replicas. + // Replicas is the most recently observed number of replicas. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index ef1de63b2..d7e920991 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ @@ -85,7 +85,7 @@ func (DaemonSetList) SwaggerDoc() map[string]string { var map_DaemonSetSpec = map[string]string{ "": "DaemonSetSpec is the specification of a daemon set.", "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). The only allowed template.spec.restartPolicy value is \"Always\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", @@ -162,7 +162,7 @@ var map_DeploymentSpec = map[string]string{ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", - "template": "Template describes the pods that will be created.", + "template": "Template describes the pods that will be created. The only allowed template.spec.restartPolicy value is \"Always\".", "strategy": "The deployment strategy to use to replace existing pods with new ones.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", @@ -248,7 +248,7 @@ func (ReplicaSetSpec) SwaggerDoc() map[string]string { var map_ReplicaSetStatus = map[string]string{ "": "ReplicaSetStatus represents the current status of a ReplicaSet.", - "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "replicas": "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", "readyReplicas": "readyReplicas is the number of pods targeted by this ReplicaSet controller with a Ready Condition.", "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", @@ -263,7 +263,7 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string { var map_RollingUpdateDaemonSet = map[string]string{ "": "Spec to control the desired behavior of daemon set rolling update.", "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", - "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.", + "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.", } func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { @@ -313,7 +313,7 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "ScaleStatus represents the current status of a scale subresource.", "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "selector": "selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", } @@ -322,7 +322,7 @@ func (ScaleStatus) SwaggerDoc() map[string]string { } var map_StatefulSet = map[string]string{ - "": "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "": "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\n\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", "spec": "Spec defines the desired identities of pods in this set.", "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", } @@ -352,6 +352,15 @@ func (StatefulSetList) SwaggerDoc() map[string]string { return map_StatefulSetList } +var map_StatefulSetOrdinals = map[string]string{ + "": "StatefulSetOrdinals describes the policy used for replica ordinal assignment in this StatefulSet.", + "start": "start is the number representing the first replica's index. It may be used to number replicas from an alternate index (eg: 1-indexed) over the default 0-indexed names, or to orchestrate progressive movement of replicas from one StatefulSet to another. If set, replica indices will be in the range:\n [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas).\nIf unset, defaults to 0. Replica indices will be in the range:\n [0, .spec.replicas).", +} + +func (StatefulSetOrdinals) SwaggerDoc() map[string]string { + return map_StatefulSetOrdinals +} + var map_StatefulSetPersistentVolumeClaimRetentionPolicy = map[string]string{ "": "StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", "whenDeleted": "WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", @@ -366,14 +375,15 @@ var map_StatefulSetSpec = map[string]string{ "": "A StatefulSetSpec is the specification of a StatefulSet.", "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format -. For example, a pod in a StatefulSet named \"web\" with index number \"3\" would be named \"web-3\". The only allowed template.spec.restartPolicy value is \"Always\".", "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { @@ -391,7 +401,7 @@ var map_StatefulSetStatus = map[string]string{ "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", "conditions": "Represents the latest available observations of a statefulset's current state.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go index 118fd3809..cd92792db 100644 --- a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go @@ -760,6 +760,22 @@ func (in *StatefulSetList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetOrdinals) DeepCopyInto(out *StatefulSetOrdinals) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetOrdinals. +func (in *StatefulSetOrdinals) DeepCopy() *StatefulSetOrdinals { + if in == nil { + return nil + } + out := new(StatefulSetOrdinals) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) { *out = *in @@ -808,6 +824,11 @@ func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { *out = new(StatefulSetPersistentVolumeClaimRetentionPolicy) **out = **in } + if in.Ordinals != nil { + in, out := &in.Ordinals, &out.Ordinals + *out = new(StatefulSetOrdinals) + **out = **in + } return } diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go index efbecf02c..304bbd074 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1/generated.pb.go @@ -102,10 +102,66 @@ func (m *ExtraValue) XXX_DiscardUnknown() { var xxx_messageInfo_ExtraValue proto.InternalMessageInfo +func (m *SelfSubjectReview) Reset() { *m = SelfSubjectReview{} } +func (*SelfSubjectReview) ProtoMessage() {} +func (*SelfSubjectReview) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{2} +} +func (m *SelfSubjectReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectReview.Merge(m, src) +} +func (m *SelfSubjectReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectReview proto.InternalMessageInfo + +func (m *SelfSubjectReviewStatus) Reset() { *m = SelfSubjectReviewStatus{} } +func (*SelfSubjectReviewStatus) ProtoMessage() {} +func (*SelfSubjectReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{3} +} +func (m *SelfSubjectReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectReviewStatus.Merge(m, src) +} +func (m *SelfSubjectReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectReviewStatus proto.InternalMessageInfo + func (m *TokenRequest) Reset() { *m = TokenRequest{} } func (*TokenRequest) ProtoMessage() {} func (*TokenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{2} + return fileDescriptor_2953ea822e7ffe1e, []int{4} } func (m *TokenRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,7 +189,7 @@ var xxx_messageInfo_TokenRequest proto.InternalMessageInfo func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} } func (*TokenRequestSpec) ProtoMessage() {} func (*TokenRequestSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{3} + return fileDescriptor_2953ea822e7ffe1e, []int{5} } func (m *TokenRequestSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,7 +217,7 @@ var xxx_messageInfo_TokenRequestSpec proto.InternalMessageInfo func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} } func (*TokenRequestStatus) ProtoMessage() {} func (*TokenRequestStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{4} + return fileDescriptor_2953ea822e7ffe1e, []int{6} } func (m *TokenRequestStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,7 +245,7 @@ var xxx_messageInfo_TokenRequestStatus proto.InternalMessageInfo func (m *TokenReview) Reset() { *m = TokenReview{} } func (*TokenReview) ProtoMessage() {} func (*TokenReview) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{5} + return fileDescriptor_2953ea822e7ffe1e, []int{7} } func (m *TokenReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -217,7 +273,7 @@ var xxx_messageInfo_TokenReview proto.InternalMessageInfo func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } func (*TokenReviewSpec) ProtoMessage() {} func (*TokenReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{6} + return fileDescriptor_2953ea822e7ffe1e, []int{8} } func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -245,7 +301,7 @@ var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } func (*TokenReviewStatus) ProtoMessage() {} func (*TokenReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{7} + return fileDescriptor_2953ea822e7ffe1e, []int{9} } func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -273,7 +329,7 @@ var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo func (m *UserInfo) Reset() { *m = UserInfo{} } func (*UserInfo) ProtoMessage() {} func (*UserInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{8} + return fileDescriptor_2953ea822e7ffe1e, []int{10} } func (m *UserInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -301,6 +357,8 @@ var xxx_messageInfo_UserInfo proto.InternalMessageInfo func init() { proto.RegisterType((*BoundObjectReference)(nil), "k8s.io.api.authentication.v1.BoundObjectReference") proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1.ExtraValue") + proto.RegisterType((*SelfSubjectReview)(nil), "k8s.io.api.authentication.v1.SelfSubjectReview") + proto.RegisterType((*SelfSubjectReviewStatus)(nil), "k8s.io.api.authentication.v1.SelfSubjectReviewStatus") proto.RegisterType((*TokenRequest)(nil), "k8s.io.api.authentication.v1.TokenRequest") proto.RegisterType((*TokenRequestSpec)(nil), "k8s.io.api.authentication.v1.TokenRequestSpec") proto.RegisterType((*TokenRequestStatus)(nil), "k8s.io.api.authentication.v1.TokenRequestStatus") @@ -316,64 +374,67 @@ func init() { } var fileDescriptor_2953ea822e7ffe1e = []byte{ - // 907 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0x8e, 0xf3, 0xa3, 0x4a, 0x26, 0xdb, 0xd2, 0xce, 0xb2, 0x52, 0x54, 0x96, 0xa4, 0x78, 0x25, - 0x54, 0x01, 0x6b, 0x6f, 0x23, 0x04, 0xab, 0x45, 0x42, 0xaa, 0x69, 0x04, 0x11, 0x82, 0x5d, 0xcd, - 0x6e, 0x0b, 0xe2, 0xc4, 0xc4, 0x7e, 0x4d, 0x87, 0xe0, 0xb1, 0xb1, 0xc7, 0x61, 0x73, 0xdb, 0x3f, - 0x81, 0x23, 0x48, 0x1c, 0xf8, 0x23, 0x90, 0xf8, 0x17, 0x7a, 0x5c, 0x71, 0xea, 0x01, 0x45, 0xd4, - 0x5c, 0x39, 0x72, 0xe2, 0x84, 0x66, 0x3c, 0xad, 0xe3, 0xa4, 0x4d, 0x73, 0xe2, 0x96, 0x79, 0xef, - 0x7b, 0xdf, 0xbc, 0xf7, 0xcd, 0xe7, 0x99, 0xa0, 0xde, 0xe8, 0x61, 0x6c, 0xb1, 0xc0, 0x1e, 0x25, - 0x03, 0x88, 0x38, 0x08, 0x88, 0xed, 0x31, 0x70, 0x2f, 0x88, 0x6c, 0x9d, 0xa0, 0x21, 0xb3, 0x69, - 0x22, 0x4e, 0x80, 0x0b, 0xe6, 0x52, 0xc1, 0x02, 0x6e, 0x8f, 0xf7, 0xec, 0x21, 0x70, 0x88, 0xa8, - 0x00, 0xcf, 0x0a, 0xa3, 0x40, 0x04, 0xf8, 0x6e, 0x86, 0xb6, 0x68, 0xc8, 0xac, 0x22, 0xda, 0x1a, - 0xef, 0x6d, 0xdf, 0x1f, 0x32, 0x71, 0x92, 0x0c, 0x2c, 0x37, 0xf0, 0xed, 0x61, 0x30, 0x0c, 0x6c, - 0x55, 0x34, 0x48, 0x8e, 0xd5, 0x4a, 0x2d, 0xd4, 0xaf, 0x8c, 0x6c, 0xfb, 0xdd, 0x7c, 0x6b, 0x9f, - 0xba, 0x27, 0x8c, 0x43, 0x34, 0xb1, 0xc3, 0xd1, 0x50, 0x06, 0x62, 0xdb, 0x07, 0x41, 0xaf, 0x68, - 0x61, 0xdb, 0xbe, 0xae, 0x2a, 0x4a, 0xb8, 0x60, 0x3e, 0x2c, 0x14, 0xbc, 0x77, 0x53, 0x41, 0xec, - 0x9e, 0x80, 0x4f, 0xe7, 0xeb, 0xcc, 0xdf, 0x0d, 0xf4, 0xaa, 0x13, 0x24, 0xdc, 0x7b, 0x3c, 0xf8, - 0x06, 0x5c, 0x41, 0xe0, 0x18, 0x22, 0xe0, 0x2e, 0xe0, 0x1d, 0x54, 0x1d, 0x31, 0xee, 0xb5, 0x8c, - 0x1d, 0x63, 0xb7, 0xe1, 0xdc, 0x3a, 0x9d, 0x76, 0x4a, 0xe9, 0xb4, 0x53, 0xfd, 0x94, 0x71, 0x8f, - 0xa8, 0x0c, 0xee, 0x22, 0x44, 0x43, 0x76, 0x04, 0x51, 0xcc, 0x02, 0xde, 0x2a, 0x2b, 0x1c, 0xd6, - 0x38, 0xb4, 0xff, 0xa4, 0xaf, 0x33, 0x64, 0x06, 0x25, 0x59, 0x39, 0xf5, 0xa1, 0x55, 0x29, 0xb2, - 0x7e, 0x4e, 0x7d, 0x20, 0x2a, 0x83, 0x1d, 0x54, 0x49, 0xfa, 0x07, 0xad, 0xaa, 0x02, 0x3c, 0xd0, - 0x80, 0xca, 0x61, 0xff, 0xe0, 0xdf, 0x69, 0xe7, 0x8d, 0xeb, 0x86, 0x14, 0x93, 0x10, 0x62, 0xeb, - 0xb0, 0x7f, 0x40, 0x64, 0xb1, 0xf9, 0x3e, 0x42, 0xbd, 0xe7, 0x22, 0xa2, 0x47, 0xf4, 0xdb, 0x04, - 0x70, 0x07, 0xd5, 0x98, 0x00, 0x3f, 0x6e, 0x19, 0x3b, 0x95, 0xdd, 0x86, 0xd3, 0x48, 0xa7, 0x9d, - 0x5a, 0x5f, 0x06, 0x48, 0x16, 0x7f, 0x54, 0xff, 0xf1, 0x97, 0x4e, 0xe9, 0xc5, 0x1f, 0x3b, 0x25, - 0xf3, 0xe7, 0x32, 0xba, 0xf5, 0x2c, 0x18, 0x01, 0x27, 0xf0, 0x5d, 0x02, 0xb1, 0xc0, 0x5f, 0xa3, - 0xba, 0x3c, 0x22, 0x8f, 0x0a, 0xaa, 0x94, 0x68, 0x76, 0x1f, 0x58, 0xb9, 0x3b, 0x2e, 0x9b, 0xb0, - 0xc2, 0xd1, 0x50, 0x06, 0x62, 0x4b, 0xa2, 0xad, 0xf1, 0x9e, 0x95, 0xc9, 0xf9, 0x19, 0x08, 0x9a, - 0x6b, 0x92, 0xc7, 0xc8, 0x25, 0x2b, 0x7e, 0x82, 0xaa, 0x71, 0x08, 0xae, 0xd2, 0xaf, 0xd9, 0xb5, - 0xac, 0x65, 0xde, 0xb3, 0x66, 0x7b, 0x7b, 0x1a, 0x82, 0x9b, 0x2b, 0x28, 0x57, 0x44, 0x31, 0xe1, - 0x2f, 0xd1, 0x5a, 0x2c, 0xa8, 0x48, 0x62, 0xa5, 0x72, 0xb1, 0xe3, 0x9b, 0x38, 0x55, 0x9d, 0xb3, - 0xa1, 0x59, 0xd7, 0xb2, 0x35, 0xd1, 0x7c, 0xe6, 0x3f, 0x06, 0xda, 0x9c, 0x6f, 0x01, 0xbf, 0x8d, - 0x1a, 0x34, 0xf1, 0x98, 0x34, 0xcd, 0x85, 0xc4, 0xeb, 0xe9, 0xb4, 0xd3, 0xd8, 0xbf, 0x08, 0x92, - 0x3c, 0x8f, 0x3f, 0x42, 0x5b, 0xf0, 0x3c, 0x64, 0x91, 0xda, 0xfd, 0x29, 0xb8, 0x01, 0xf7, 0x62, - 0x75, 0xd6, 0x15, 0xe7, 0x4e, 0x3a, 0xed, 0x6c, 0xf5, 0xe6, 0x93, 0x64, 0x11, 0x8f, 0x39, 0xda, - 0x18, 0x14, 0x2c, 0xab, 0x07, 0xed, 0x2e, 0x1f, 0xf4, 0x2a, 0x9b, 0x3b, 0x38, 0x9d, 0x76, 0x36, - 0x8a, 0x19, 0x32, 0xc7, 0x6e, 0xfe, 0x6a, 0x20, 0xbc, 0xa8, 0x12, 0xbe, 0x87, 0x6a, 0x42, 0x46, - 0xf5, 0x27, 0xb2, 0xae, 0x45, 0xab, 0x65, 0xd0, 0x2c, 0x87, 0x27, 0xe8, 0x76, 0x3e, 0xc0, 0x33, - 0xe6, 0x43, 0x2c, 0xa8, 0x1f, 0xea, 0xd3, 0x7e, 0x6b, 0x35, 0x2f, 0xc9, 0x32, 0xe7, 0x35, 0x4d, - 0x7f, 0xbb, 0xb7, 0x48, 0x47, 0xae, 0xda, 0xc3, 0xfc, 0xa9, 0x8c, 0x9a, 0xba, 0xed, 0x31, 0x83, - 0xef, 0xff, 0x07, 0x2f, 0x3f, 0x2e, 0x78, 0xf9, 0xfe, 0x4a, 0xbe, 0x93, 0xad, 0x5d, 0x6b, 0xe5, - 0x2f, 0xe6, 0xac, 0x6c, 0xaf, 0x4e, 0xb9, 0xdc, 0xc9, 0x2e, 0x7a, 0x65, 0x6e, 0xff, 0xd5, 0x8e, - 0xb3, 0x60, 0xf6, 0xf2, 0x72, 0xb3, 0x9b, 0x7f, 0x1b, 0x68, 0x6b, 0xa1, 0x25, 0xfc, 0x01, 0x5a, - 0x9f, 0xe9, 0x1c, 0xb2, 0x1b, 0xb6, 0xee, 0xdc, 0xd1, 0xfb, 0xad, 0xef, 0xcf, 0x26, 0x49, 0x11, - 0x8b, 0x3f, 0x41, 0xd5, 0x24, 0x86, 0x48, 0x2b, 0xfc, 0xe6, 0x72, 0x39, 0x0e, 0x63, 0x88, 0xfa, - 0xfc, 0x38, 0xc8, 0xa5, 0x95, 0x11, 0xa2, 0x18, 0x8a, 0x93, 0x54, 0x6f, 0xf8, 0x6c, 0xef, 0xa1, - 0x1a, 0x44, 0x51, 0x10, 0xe9, 0x7b, 0xfb, 0x52, 0x9b, 0x9e, 0x0c, 0x92, 0x2c, 0x67, 0xfe, 0x56, - 0x46, 0xf5, 0x8b, 0x2d, 0xf1, 0x3b, 0xa8, 0x2e, 0xb7, 0x51, 0x97, 0x7d, 0x26, 0xe8, 0xa6, 0x2e, - 0x52, 0x18, 0x19, 0x27, 0x97, 0x08, 0xfc, 0x3a, 0xaa, 0x24, 0xcc, 0xd3, 0x6f, 0x48, 0x73, 0xe6, - 0xd2, 0x27, 0x32, 0x8e, 0x4d, 0xb4, 0x36, 0x8c, 0x82, 0x24, 0x94, 0x36, 0x90, 0x8d, 0x22, 0x79, - 0xa2, 0x1f, 0xab, 0x08, 0xd1, 0x19, 0x7c, 0x84, 0x6a, 0x20, 0xef, 0x7c, 0x35, 0x4b, 0xb3, 0xbb, - 0xb7, 0x9a, 0x34, 0x96, 0x7a, 0x27, 0x7a, 0x5c, 0x44, 0x93, 0x99, 0xa9, 0x64, 0x8c, 0x64, 0x74, - 0xdb, 0x03, 0xfd, 0x96, 0x28, 0x0c, 0xde, 0x44, 0x95, 0x11, 0x4c, 0xb2, 0x89, 0x88, 0xfc, 0x89, - 0x3f, 0x44, 0xb5, 0xb1, 0x7c, 0x66, 0xf4, 0x91, 0xec, 0x2e, 0xdf, 0x37, 0x7f, 0x96, 0x48, 0x56, - 0xf6, 0xa8, 0xfc, 0xd0, 0x70, 0x9c, 0xd3, 0xf3, 0x76, 0xe9, 0xe5, 0x79, 0xbb, 0x74, 0x76, 0xde, - 0x2e, 0xbd, 0x48, 0xdb, 0xc6, 0x69, 0xda, 0x36, 0x5e, 0xa6, 0x6d, 0xe3, 0x2c, 0x6d, 0x1b, 0x7f, - 0xa6, 0x6d, 0xe3, 0x87, 0xbf, 0xda, 0xa5, 0xaf, 0xee, 0x2e, 0xfb, 0x13, 0xf3, 0x5f, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x12, 0xb8, 0x31, 0x91, 0xfc, 0x08, 0x00, 0x00, + // 958 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4b, 0x6f, 0x23, 0x45, + 0x10, 0xf6, 0xf8, 0x11, 0xd9, 0xe5, 0x4d, 0x48, 0x7a, 0x59, 0x61, 0x85, 0xc5, 0x0e, 0xb3, 0x12, + 0x8a, 0x80, 0x9d, 0xd9, 0x58, 0x3c, 0x56, 0x8b, 0x84, 0x94, 0x21, 0x16, 0x58, 0x08, 0x76, 0xd5, + 0x4e, 0x02, 0x42, 0x42, 0xa2, 0x3d, 0xae, 0x38, 0x83, 0x77, 0x1e, 0xcc, 0xf4, 0x98, 0xf5, 0x6d, + 0x7f, 0x02, 0x47, 0x90, 0x38, 0xf0, 0x23, 0x90, 0xf8, 0x0b, 0x39, 0xae, 0x10, 0x87, 0x3d, 0x20, + 0x8b, 0x0c, 0x57, 0x8e, 0x9c, 0x38, 0xa1, 0xee, 0xe9, 0xf8, 0x99, 0x4c, 0x7c, 0xda, 0x9b, 0xa7, + 0x1e, 0x5f, 0x55, 0x7d, 0x55, 0x5d, 0x65, 0x68, 0x0d, 0xee, 0x47, 0x86, 0xe3, 0x9b, 0x83, 0xb8, + 0x8b, 0xa1, 0x87, 0x1c, 0x23, 0x73, 0x88, 0x5e, 0xcf, 0x0f, 0x4d, 0xa5, 0x60, 0x81, 0x63, 0xb2, + 0x98, 0x9f, 0xa2, 0xc7, 0x1d, 0x9b, 0x71, 0xc7, 0xf7, 0xcc, 0xe1, 0x9e, 0xd9, 0x47, 0x0f, 0x43, + 0xc6, 0xb1, 0x67, 0x04, 0xa1, 0xcf, 0x7d, 0x72, 0x3b, 0xb5, 0x36, 0x58, 0xe0, 0x18, 0xf3, 0xd6, + 0xc6, 0x70, 0x6f, 0xfb, 0x6e, 0xdf, 0xe1, 0xa7, 0x71, 0xd7, 0xb0, 0x7d, 0xd7, 0xec, 0xfb, 0x7d, + 0xdf, 0x94, 0x4e, 0xdd, 0xf8, 0x44, 0x7e, 0xc9, 0x0f, 0xf9, 0x2b, 0x05, 0xdb, 0x7e, 0x67, 0x1a, + 0xda, 0x65, 0xf6, 0xa9, 0xe3, 0x61, 0x38, 0x32, 0x83, 0x41, 0x5f, 0x08, 0x22, 0xd3, 0x45, 0xce, + 0x2e, 0x49, 0x61, 0xdb, 0xbc, 0xca, 0x2b, 0x8c, 0x3d, 0xee, 0xb8, 0xb8, 0xe4, 0xf0, 0xde, 0x75, + 0x0e, 0x91, 0x7d, 0x8a, 0x2e, 0x5b, 0xf4, 0xd3, 0x7f, 0xd7, 0xe0, 0x65, 0xcb, 0x8f, 0xbd, 0xde, + 0xc3, 0xee, 0xb7, 0x68, 0x73, 0x8a, 0x27, 0x18, 0xa2, 0x67, 0x23, 0xd9, 0x81, 0xe2, 0xc0, 0xf1, + 0x7a, 0x35, 0x6d, 0x47, 0xdb, 0xad, 0x58, 0x37, 0xce, 0xc6, 0x8d, 0x5c, 0x32, 0x6e, 0x14, 0x3f, + 0x75, 0xbc, 0x1e, 0x95, 0x1a, 0xd2, 0x04, 0x60, 0x81, 0x73, 0x8c, 0x61, 0xe4, 0xf8, 0x5e, 0x2d, + 0x2f, 0xed, 0x88, 0xb2, 0x83, 0xfd, 0x47, 0x6d, 0xa5, 0xa1, 0x33, 0x56, 0x02, 0xd5, 0x63, 0x2e, + 0xd6, 0x0a, 0xf3, 0xa8, 0x9f, 0x33, 0x17, 0xa9, 0xd4, 0x10, 0x0b, 0x0a, 0x71, 0xfb, 0xa0, 0x56, + 0x94, 0x06, 0xf7, 0x94, 0x41, 0xe1, 0xa8, 0x7d, 0xf0, 0xdf, 0xb8, 0xf1, 0xfa, 0x55, 0x45, 0xf2, + 0x51, 0x80, 0x91, 0x71, 0xd4, 0x3e, 0xa0, 0xc2, 0x59, 0x7f, 0x1f, 0xa0, 0xf5, 0x84, 0x87, 0xec, + 0x98, 0x3d, 0x8e, 0x91, 0x34, 0xa0, 0xe4, 0x70, 0x74, 0xa3, 0x9a, 0xb6, 0x53, 0xd8, 0xad, 0x58, + 0x95, 0x64, 0xdc, 0x28, 0xb5, 0x85, 0x80, 0xa6, 0xf2, 0x07, 0xe5, 0x1f, 0x7f, 0x69, 0xe4, 0x9e, + 0xfe, 0xb9, 0x93, 0xd3, 0xff, 0xd0, 0x60, 0xab, 0x83, 0x8f, 0x4f, 0x3a, 0xb1, 0x62, 0x63, 0xe8, + 0xe0, 0xf7, 0xe4, 0x1b, 0x28, 0x8b, 0x3e, 0xf5, 0x18, 0x67, 0x92, 0x8e, 0x6a, 0xf3, 0x9e, 0x31, + 0x1d, 0x91, 0x49, 0x26, 0x46, 0x30, 0xe8, 0x0b, 0x41, 0x64, 0x08, 0x6b, 0x63, 0xb8, 0x67, 0xa4, + 0x9c, 0x7e, 0x86, 0x9c, 0x4d, 0x89, 0x99, 0xca, 0xe8, 0x04, 0x95, 0x7c, 0x0d, 0x6b, 0x11, 0x67, + 0x3c, 0x8e, 0x24, 0x8d, 0xd5, 0xe6, 0xbb, 0x46, 0xd6, 0x08, 0x1a, 0x4b, 0x29, 0x76, 0xa4, 0xb3, + 0xb5, 0xa1, 0x82, 0xac, 0xa5, 0xdf, 0x54, 0x81, 0xea, 0x3e, 0xbc, 0x72, 0x85, 0x0b, 0x39, 0x84, + 0x72, 0x1c, 0x61, 0xd8, 0xf6, 0x4e, 0x7c, 0x55, 0xdb, 0x1b, 0xd9, 0xb1, 0x8f, 0x94, 0xb5, 0xb5, + 0xa9, 0x82, 0x95, 0x2f, 0x24, 0x74, 0x82, 0xa4, 0xff, 0x9c, 0x87, 0x1b, 0x87, 0xfe, 0x00, 0x3d, + 0x8a, 0xdf, 0xc5, 0x18, 0xf1, 0x17, 0x40, 0xe1, 0x23, 0x28, 0x46, 0x01, 0xda, 0x8a, 0x40, 0x23, + 0xbb, 0x88, 0xd9, 0xdc, 0x3a, 0x01, 0xda, 0xd3, 0x49, 0x14, 0x5f, 0x54, 0x22, 0x91, 0x2f, 0x27, + 0x4d, 0x29, 0x2c, 0x65, 0x7c, 0x1d, 0x66, 0x76, 0x3f, 0xfe, 0xd5, 0x60, 0x73, 0x31, 0x05, 0xf2, + 0x16, 0x54, 0x58, 0xdc, 0x73, 0xc4, 0xe3, 0xbb, 0x18, 0xd5, 0xf5, 0x64, 0xdc, 0xa8, 0xec, 0x5f, + 0x08, 0xe9, 0x54, 0x4f, 0x3e, 0x82, 0x2d, 0x7c, 0x12, 0x38, 0xa1, 0x8c, 0xde, 0x41, 0xdb, 0xf7, + 0x7a, 0x91, 0x7c, 0x33, 0x05, 0xeb, 0x56, 0x32, 0x6e, 0x6c, 0xb5, 0x16, 0x95, 0x74, 0xd9, 0x9e, + 0x78, 0xb0, 0xd1, 0x9d, 0x7b, 0xfa, 0xaa, 0xd0, 0x66, 0x76, 0xa1, 0x97, 0xad, 0x0b, 0x8b, 0x24, + 0xe3, 0xc6, 0xc6, 0xbc, 0x86, 0x2e, 0xa0, 0xeb, 0xbf, 0x6a, 0x40, 0x96, 0x59, 0x22, 0x77, 0xa0, + 0xc4, 0x85, 0x54, 0xad, 0x9a, 0x75, 0x45, 0x5a, 0x29, 0x35, 0x4d, 0x75, 0x64, 0x04, 0x37, 0xa7, + 0x05, 0x1c, 0x3a, 0x2e, 0x46, 0x9c, 0xb9, 0x81, 0xea, 0xf6, 0x9b, 0xab, 0xcd, 0x92, 0x70, 0xb3, + 0x5e, 0x55, 0xf0, 0x37, 0x5b, 0xcb, 0x70, 0xf4, 0xb2, 0x18, 0xfa, 0x4f, 0x79, 0xa8, 0xaa, 0xb4, + 0x5f, 0xd0, 0x3a, 0x78, 0x38, 0x37, 0xcb, 0x77, 0x57, 0x9a, 0x3b, 0xf9, 0xa6, 0xaf, 0x1a, 0xe5, + 0x2f, 0x16, 0x46, 0xd9, 0x5c, 0x1d, 0x32, 0x7b, 0x92, 0x6d, 0x78, 0x69, 0x21, 0xfe, 0x6a, 0xed, + 0x9c, 0x1b, 0xf6, 0x7c, 0xf6, 0xb0, 0xeb, 0xff, 0x68, 0xb0, 0xb5, 0x94, 0x12, 0xf9, 0x00, 0xd6, + 0x67, 0x32, 0xc7, 0xf4, 0x52, 0x95, 0xad, 0x5b, 0x2a, 0xde, 0xfa, 0xfe, 0xac, 0x92, 0xce, 0xdb, + 0x92, 0x4f, 0xa0, 0x28, 0x96, 0x95, 0x62, 0x78, 0xd5, 0x95, 0x37, 0xa1, 0x56, 0x48, 0xa8, 0x44, + 0x98, 0xaf, 0xa4, 0x78, 0xcd, 0xb3, 0xbd, 0x03, 0x25, 0x0c, 0x43, 0x3f, 0x54, 0xf7, 0x6f, 0xc2, + 0x4d, 0x4b, 0x08, 0x69, 0xaa, 0xd3, 0x7f, 0xcb, 0xc3, 0x64, 0xa7, 0x92, 0xb7, 0xd3, 0xfd, 0x2c, + 0x8f, 0x66, 0x4a, 0xe8, 0xdc, 0xde, 0x15, 0x72, 0x3a, 0xb1, 0x20, 0xaf, 0x41, 0x21, 0x76, 0x7a, + 0xea, 0x16, 0x57, 0x67, 0x8e, 0x27, 0x15, 0x72, 0xa2, 0xc3, 0x5a, 0x3f, 0xf4, 0xe3, 0x40, 0x8c, + 0x81, 0x48, 0x14, 0x44, 0x47, 0x3f, 0x96, 0x12, 0xaa, 0x34, 0xe4, 0x18, 0x4a, 0x28, 0x6e, 0xa7, + 0xac, 0xa5, 0xda, 0xdc, 0x5b, 0x8d, 0x1a, 0x43, 0xde, 0xdb, 0x96, 0xc7, 0xc3, 0xd1, 0x4c, 0x55, + 0x42, 0x46, 0x53, 0xb8, 0xed, 0xae, 0xba, 0xc9, 0xd2, 0x86, 0x6c, 0x42, 0x61, 0x80, 0xa3, 0xb4, + 0x22, 0x2a, 0x7e, 0x92, 0x0f, 0xa1, 0x34, 0x14, 0xe7, 0x5a, 0xb5, 0x64, 0x37, 0x3b, 0xee, 0xf4, + 0xbc, 0xd3, 0xd4, 0xed, 0x41, 0xfe, 0xbe, 0x66, 0x59, 0x67, 0xe7, 0xf5, 0xdc, 0xb3, 0xf3, 0x7a, + 0xee, 0xf9, 0x79, 0x3d, 0xf7, 0x34, 0xa9, 0x6b, 0x67, 0x49, 0x5d, 0x7b, 0x96, 0xd4, 0xb5, 0xe7, + 0x49, 0x5d, 0xfb, 0x2b, 0xa9, 0x6b, 0x3f, 0xfc, 0x5d, 0xcf, 0x7d, 0x75, 0x3b, 0xeb, 0xcf, 0xe0, + 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x9a, 0x38, 0x17, 0x44, 0x0a, 0x00, 0x00, } func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) { @@ -451,6 +512,82 @@ func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SelfSubjectReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SelfSubjectReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *TokenRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -850,6 +987,30 @@ func (m ExtraValue) Size() (n int) { return n } +func (m *SelfSubjectReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.UserInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *TokenRequest) Size() (n int) { if m == nil { return 0 @@ -999,6 +1160,27 @@ func (this *BoundObjectReference) String() string { }, "") return s } +func (this *SelfSubjectReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SelfSubjectReviewStatus", "SelfSubjectReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectReviewStatus{`, + `UserInfo:` + strings.Replace(strings.Replace(this.UserInfo.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *TokenRequest) String() string { if this == nil { return "nil" @@ -1361,6 +1543,205 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } return nil } +func (m *SelfSubjectReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *TokenRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto index d21584209..1632070c8 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1/generated.proto @@ -56,6 +56,26 @@ message ExtraValue { repeated string items = 1; } +// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. +message SelfSubjectReview { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Status is filled in by the server with the user attributes. + optional SelfSubjectReviewStatus status = 2; +} + +// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user. +message SelfSubjectReviewStatus { + // User attributes of the user making this request. + // +optional + optional UserInfo userInfo = 1; +} + // TokenRequest requests a token for a given service account. message TokenRequest { // Standard object's metadata. @@ -74,7 +94,7 @@ message TokenRequest { // TokenRequestSpec contains client provided parameters of a token request. message TokenRequestSpec { // Audiences are the intendend audiences of the token. A recipient of a - // token must identitfy themself with an identifier in the list of + // token must identify themself with an identifier in the list of // audiences of the token, and otherwise should reject the token. A // token issued for multiple audiences may be used to authenticate // against any of the audiences listed but implies a high degree of diff --git a/vendor/k8s.io/api/authentication/v1/register.go b/vendor/k8s.io/api/authentication/v1/register.go index c522e4a46..6a32b5926 100644 --- a/vendor/k8s.io/api/authentication/v1/register.go +++ b/vendor/k8s.io/api/authentication/v1/register.go @@ -46,6 +46,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &TokenReview{}, &TokenRequest{}, + &SelfSubjectReview{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go index 7ba247d63..b498007c0 100644 --- a/vendor/k8s.io/api/authentication/v1/types.go +++ b/vendor/k8s.io/api/authentication/v1/types.go @@ -151,7 +151,7 @@ type TokenRequest struct { // TokenRequestSpec contains client provided parameters of a token request. type TokenRequestSpec struct { // Audiences are the intendend audiences of the token. A recipient of a - // token must identitfy themself with an identifier in the list of + // token must identify themself with an identifier in the list of // audiences of the token, and otherwise should reject the token. A // token issued for multiple audiences may be used to authenticate // against any of the audiences listed but implies a high degree of @@ -197,3 +197,28 @@ type BoundObjectReference struct { // +optional UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uID,casttype=k8s.io/apimachinery/pkg/types.UID"` } + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. +type SelfSubjectReview struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Status is filled in by the server with the user attributes. + Status SelfSubjectReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user. +type SelfSubjectReviewStatus struct { + // User attributes of the user making this request. + // +optional + UserInfo UserInfo `json:"userInfo,omitempty" protobuf:"bytes,1,opt,name=userInfo"` +} diff --git a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go index f9a88a3df..ebfd4852c 100644 --- a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_BoundObjectReference = map[string]string{ @@ -39,6 +39,25 @@ func (BoundObjectReference) SwaggerDoc() map[string]string { return map_BoundObjectReference } +var map_SelfSubjectReview = map[string]string{ + "": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "Status is filled in by the server with the user attributes.", +} + +func (SelfSubjectReview) SwaggerDoc() map[string]string { + return map_SelfSubjectReview +} + +var map_SelfSubjectReviewStatus = map[string]string{ + "": "SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.", + "userInfo": "User attributes of the user making this request.", +} + +func (SelfSubjectReviewStatus) SwaggerDoc() map[string]string { + return map_SelfSubjectReviewStatus +} + var map_TokenRequest = map[string]string{ "": "TokenRequest requests a token for a given service account.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -52,7 +71,7 @@ func (TokenRequest) SwaggerDoc() map[string]string { var map_TokenRequestSpec = map[string]string{ "": "TokenRequestSpec contains client provided parameters of a token request.", - "audiences": "Audiences are the intendend audiences of the token. A recipient of a token must identitfy themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences.", + "audiences": "Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences.", "expirationSeconds": "ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response.", "boundObjectRef": "BoundObjectRef is a reference to an object that the token will be bound to. The token will only be valid for as long as the bound object exists. NOTE: The API server's TokenReview endpoint will validate the BoundObjectRef, but other audiences may not. Keep ExpirationSeconds small if you want prompt revocation.", } diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go index 2af533191..369c89b86 100644 --- a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go @@ -61,6 +61,50 @@ func (in ExtraValue) DeepCopy() ExtraValue { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectReview) DeepCopyInto(out *SelfSubjectReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReview. +func (in *SelfSubjectReview) DeepCopy() *SelfSubjectReview { + if in == nil { + return nil + } + out := new(SelfSubjectReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectReviewStatus) DeepCopyInto(out *SelfSubjectReviewStatus) { + *out = *in + in.UserInfo.DeepCopyInto(&out.UserInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReviewStatus. +func (in *SelfSubjectReviewStatus) DeepCopy() *SelfSubjectReviewStatus { + if in == nil { + return nil + } + out := new(SelfSubjectReviewStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenRequest) DeepCopyInto(out *TokenRequest) { *out = *in diff --git a/vendor/k8s.io/api/authentication/v1alpha1/doc.go b/vendor/k8s.io/api/authentication/v1alpha1/doc.go new file mode 100644 index 000000000..eb32def90 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +groupName=authentication.k8s.io +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true + +package v1alpha1 // import "k8s.io/api/authentication/v1alpha1" diff --git a/vendor/k8s.io/api/authentication/v1alpha1/generated.pb.go b/vendor/k8s.io/api/authentication/v1alpha1/generated.pb.go new file mode 100644 index 000000000..ea274ac07 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1alpha1/generated.pb.go @@ -0,0 +1,567 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *SelfSubjectReview) Reset() { *m = SelfSubjectReview{} } +func (*SelfSubjectReview) ProtoMessage() {} +func (*SelfSubjectReview) Descriptor() ([]byte, []int) { + return fileDescriptor_05a77aeb710b43c2, []int{0} +} +func (m *SelfSubjectReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectReview.Merge(m, src) +} +func (m *SelfSubjectReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectReview proto.InternalMessageInfo + +func (m *SelfSubjectReviewStatus) Reset() { *m = SelfSubjectReviewStatus{} } +func (*SelfSubjectReviewStatus) ProtoMessage() {} +func (*SelfSubjectReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_05a77aeb710b43c2, []int{1} +} +func (m *SelfSubjectReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectReviewStatus.Merge(m, src) +} +func (m *SelfSubjectReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectReviewStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*SelfSubjectReview)(nil), "k8s.io.api.authentication.v1alpha1.SelfSubjectReview") + proto.RegisterType((*SelfSubjectReviewStatus)(nil), "k8s.io.api.authentication.v1alpha1.SelfSubjectReviewStatus") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1alpha1/generated.proto", fileDescriptor_05a77aeb710b43c2) +} + +var fileDescriptor_05a77aeb710b43c2 = []byte{ + // 384 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xbd, 0x6e, 0xdb, 0x30, + 0x14, 0x85, 0xc5, 0x0e, 0x86, 0xa1, 0x02, 0x45, 0xab, 0xa5, 0x86, 0x07, 0xba, 0xd0, 0x50, 0x74, + 0x68, 0xc9, 0xba, 0x28, 0x8a, 0x02, 0xdd, 0x34, 0x35, 0x08, 0x82, 0x00, 0x72, 0xb2, 0x64, 0x0a, + 0x25, 0x5f, 0x4b, 0x8c, 0x2c, 0x52, 0x10, 0x49, 0x05, 0xd9, 0xf2, 0x08, 0x79, 0x2c, 0x8f, 0x1e, + 0x8d, 0x0c, 0x46, 0xac, 0xbc, 0x48, 0x20, 0x59, 0xb6, 0x11, 0x3b, 0xb6, 0x37, 0xde, 0xc3, 0xfb, + 0x9d, 0x7b, 0xf8, 0x63, 0x9f, 0x26, 0x7f, 0x15, 0xe1, 0x92, 0x26, 0x26, 0x80, 0x5c, 0x80, 0x06, + 0x45, 0x0b, 0x10, 0x43, 0x99, 0xd3, 0x66, 0x83, 0x65, 0x9c, 0x32, 0xa3, 0x63, 0x10, 0x9a, 0x87, + 0x4c, 0x73, 0x29, 0x68, 0xd1, 0x67, 0xe3, 0x2c, 0x66, 0x7d, 0x1a, 0x81, 0x80, 0x9c, 0x69, 0x18, + 0x92, 0x2c, 0x97, 0x5a, 0x3a, 0xee, 0x92, 0x21, 0x2c, 0xe3, 0xe4, 0x35, 0x43, 0x56, 0x4c, 0xf7, + 0x47, 0xc4, 0x75, 0x6c, 0x02, 0x12, 0xca, 0x94, 0x46, 0x32, 0x92, 0xb4, 0x46, 0x03, 0x33, 0xaa, + 0xab, 0xba, 0xa8, 0x57, 0x4b, 0xcb, 0xee, 0xf7, 0x43, 0x31, 0xb6, 0x03, 0x74, 0x7f, 0x6f, 0xba, + 0x53, 0x16, 0xc6, 0x5c, 0x40, 0x7e, 0x47, 0xb3, 0x24, 0xaa, 0x04, 0x45, 0x53, 0xd0, 0xec, 0x2d, + 0x8a, 0xee, 0xa3, 0x72, 0x23, 0x34, 0x4f, 0x61, 0x07, 0xf8, 0x73, 0x0c, 0x50, 0x61, 0x0c, 0x29, + 0xdb, 0xe6, 0xdc, 0x47, 0x64, 0x7f, 0x1a, 0xc0, 0x78, 0x34, 0x30, 0xc1, 0x0d, 0x84, 0xda, 0x87, + 0x82, 0xc3, 0xad, 0x73, 0x6d, 0xb7, 0xab, 0x64, 0x43, 0xa6, 0x59, 0x07, 0x7d, 0x41, 0xdf, 0xde, + 0xff, 0xfa, 0x49, 0x36, 0x17, 0xb9, 0x1e, 0x40, 0xb2, 0x24, 0xaa, 0x04, 0x45, 0xaa, 0x6e, 0x52, + 0xf4, 0xc9, 0x79, 0xed, 0x72, 0x06, 0x9a, 0x79, 0xce, 0x64, 0xde, 0xb3, 0xca, 0x79, 0xcf, 0xde, + 0x68, 0xfe, 0xda, 0xd5, 0x09, 0xed, 0x96, 0xd2, 0x4c, 0x1b, 0xd5, 0x79, 0x57, 0xfb, 0xff, 0x23, + 0xc7, 0x1f, 0x8a, 0xec, 0x04, 0x1d, 0xd4, 0x16, 0xde, 0x87, 0x66, 0x54, 0x6b, 0x59, 0xfb, 0x8d, + 0xb5, 0x2b, 0xed, 0xcf, 0x7b, 0x10, 0xe7, 0xc2, 0x6e, 0x1b, 0x05, 0xf9, 0x89, 0x18, 0xc9, 0xe6, + 0x84, 0x5f, 0x0f, 0x26, 0x20, 0x97, 0x4d, 0xb7, 0xf7, 0xb1, 0x19, 0xd6, 0x5e, 0x29, 0xfe, 0xda, + 0xc9, 0xfb, 0x3f, 0x59, 0x60, 0x6b, 0xba, 0xc0, 0xd6, 0x6c, 0x81, 0xad, 0xfb, 0x12, 0xa3, 0x49, + 0x89, 0xd1, 0xb4, 0xc4, 0x68, 0x56, 0x62, 0xf4, 0x54, 0x62, 0xf4, 0xf0, 0x8c, 0xad, 0x2b, 0xf7, + 0xf8, 0x3f, 0x7e, 0x09, 0x00, 0x00, 0xff, 0xff, 0xec, 0xf9, 0xa3, 0xcd, 0x05, 0x03, 0x00, 0x00, +} + +func (m *SelfSubjectReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SelfSubjectReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *SelfSubjectReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.UserInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SelfSubjectReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SelfSubjectReviewStatus", "SelfSubjectReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectReviewStatus{`, + `UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SelfSubjectReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/authentication/v1alpha1/generated.proto b/vendor/k8s.io/api/authentication/v1alpha1/generated.proto new file mode 100644 index 000000000..51d925244 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1alpha1/generated.proto @@ -0,0 +1,51 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.authentication.v1alpha1; + +import "k8s.io/api/authentication/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/authentication/v1alpha1"; + +// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. +message SelfSubjectReview { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Status is filled in by the server with the user attributes. + optional SelfSubjectReviewStatus status = 2; +} + +// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user. +message SelfSubjectReviewStatus { + // User attributes of the user making this request. + // +optional + optional k8s.io.api.authentication.v1.UserInfo userInfo = 1; +} + diff --git a/vendor/k8s.io/api/authentication/v1alpha1/register.go b/vendor/k8s.io/api/authentication/v1alpha1/register.go new file mode 100644 index 000000000..4f93928d9 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1alpha1/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &SelfSubjectReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/authentication/v1alpha1/types.go b/vendor/k8s.io/api/authentication/v1alpha1/types.go new file mode 100644 index 000000000..1ee3612fb --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1alpha1/types.go @@ -0,0 +1,48 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. +type SelfSubjectReview struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Status is filled in by the server with the user attributes. + Status SelfSubjectReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user. +type SelfSubjectReviewStatus struct { + // User attributes of the user making this request. + // +optional + UserInfo v1.UserInfo `json:"userInfo,omitempty" protobuf:"bytes,1,opt,name=userInfo"` +} diff --git a/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..1ffcc99e7 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,49 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_SelfSubjectReview = map[string]string{ + "": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "Status is filled in by the server with the user attributes.", +} + +func (SelfSubjectReview) SwaggerDoc() map[string]string { + return map_SelfSubjectReview +} + +var map_SelfSubjectReviewStatus = map[string]string{ + "": "SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.", + "userInfo": "User attributes of the user making this request.", +} + +func (SelfSubjectReviewStatus) SwaggerDoc() map[string]string { + return map_SelfSubjectReviewStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..9b146a847 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,70 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectReview) DeepCopyInto(out *SelfSubjectReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReview. +func (in *SelfSubjectReview) DeepCopy() *SelfSubjectReview { + if in == nil { + return nil + } + out := new(SelfSubjectReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectReviewStatus) DeepCopyInto(out *SelfSubjectReviewStatus) { + *out = *in + in.UserInfo.DeepCopyInto(&out.UserInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReviewStatus. +func (in *SelfSubjectReviewStatus) DeepCopy() *SelfSubjectReviewStatus { + if in == nil { + return nil + } + out := new(SelfSubjectReviewStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..62a70a781 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,40 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SelfSubjectReview) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *SelfSubjectReview) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *SelfSubjectReview) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go index 1978dcf6a..7f1d5ca6c 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go @@ -72,10 +72,66 @@ func (m *ExtraValue) XXX_DiscardUnknown() { var xxx_messageInfo_ExtraValue proto.InternalMessageInfo +func (m *SelfSubjectReview) Reset() { *m = SelfSubjectReview{} } +func (*SelfSubjectReview) ProtoMessage() {} +func (*SelfSubjectReview) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{1} +} +func (m *SelfSubjectReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectReview.Merge(m, src) +} +func (m *SelfSubjectReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectReview proto.InternalMessageInfo + +func (m *SelfSubjectReviewStatus) Reset() { *m = SelfSubjectReviewStatus{} } +func (*SelfSubjectReviewStatus) ProtoMessage() {} +func (*SelfSubjectReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{2} +} +func (m *SelfSubjectReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectReviewStatus.Merge(m, src) +} +func (m *SelfSubjectReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectReviewStatus proto.InternalMessageInfo + func (m *TokenReview) Reset() { *m = TokenReview{} } func (*TokenReview) ProtoMessage() {} func (*TokenReview) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{1} + return fileDescriptor_77c9b20d3ad27844, []int{3} } func (m *TokenReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +159,7 @@ var xxx_messageInfo_TokenReview proto.InternalMessageInfo func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } func (*TokenReviewSpec) ProtoMessage() {} func (*TokenReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{2} + return fileDescriptor_77c9b20d3ad27844, []int{4} } func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +187,7 @@ var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } func (*TokenReviewStatus) ProtoMessage() {} func (*TokenReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{3} + return fileDescriptor_77c9b20d3ad27844, []int{5} } func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +215,7 @@ var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo func (m *UserInfo) Reset() { *m = UserInfo{} } func (*UserInfo) ProtoMessage() {} func (*UserInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{4} + return fileDescriptor_77c9b20d3ad27844, []int{6} } func (m *UserInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,6 +242,8 @@ var xxx_messageInfo_UserInfo proto.InternalMessageInfo func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.ExtraValue") + proto.RegisterType((*SelfSubjectReview)(nil), "k8s.io.api.authentication.v1beta1.SelfSubjectReview") + proto.RegisterType((*SelfSubjectReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.SelfSubjectReviewStatus") proto.RegisterType((*TokenReview)(nil), "k8s.io.api.authentication.v1beta1.TokenReview") proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewSpec") proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewStatus") @@ -198,49 +256,53 @@ func init() { } var fileDescriptor_77c9b20d3ad27844 = []byte{ - // 666 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcf, 0x4e, 0x13, 0x5f, - 0x14, 0x9e, 0xe9, 0x1f, 0xd2, 0xde, 0xfe, 0xfa, 0x13, 0x6f, 0x62, 0xd2, 0x34, 0x71, 0x0a, 0x75, - 0x43, 0x82, 0xdc, 0x11, 0x42, 0x90, 0xe0, 0x8a, 0x51, 0x42, 0x30, 0x21, 0x26, 0x57, 0x70, 0xa1, - 0x2e, 0xbc, 0x9d, 0x1e, 0xa6, 0x63, 0x9d, 0x3f, 0xb9, 0x73, 0xa7, 0xca, 0x8e, 0x47, 0x70, 0xe9, - 0xd2, 0xc4, 0x27, 0x71, 0xc7, 0x92, 0x25, 0x0b, 0xd3, 0xc8, 0xf8, 0x04, 0xbe, 0x81, 0xb9, 0x77, - 0x2e, 0x4c, 0x81, 0x68, 0x61, 0x37, 0xf7, 0x3b, 0xe7, 0xfb, 0xce, 0x39, 0xdf, 0xe9, 0x29, 0x7a, - 0x3e, 0x5c, 0x4f, 0x88, 0x1f, 0xd9, 0xc3, 0xb4, 0x07, 0x3c, 0x04, 0x01, 0x89, 0x3d, 0x82, 0xb0, - 0x1f, 0x71, 0x5b, 0x07, 0x58, 0xec, 0xdb, 0x2c, 0x15, 0x03, 0x08, 0x85, 0xef, 0x32, 0xe1, 0x47, - 0xa1, 0x3d, 0x5a, 0xee, 0x81, 0x60, 0xcb, 0xb6, 0x07, 0x21, 0x70, 0x26, 0xa0, 0x4f, 0x62, 0x1e, - 0x89, 0x08, 0xcf, 0xe7, 0x14, 0xc2, 0x62, 0x9f, 0x5c, 0xa6, 0x10, 0x4d, 0x69, 0x2f, 0x79, 0xbe, - 0x18, 0xa4, 0x3d, 0xe2, 0x46, 0x81, 0xed, 0x45, 0x5e, 0x64, 0x2b, 0x66, 0x2f, 0x3d, 0x50, 0x2f, - 0xf5, 0x50, 0x5f, 0xb9, 0x62, 0x7b, 0xb5, 0x68, 0x22, 0x60, 0xee, 0xc0, 0x0f, 0x81, 0x1f, 0xda, - 0xf1, 0xd0, 0x93, 0x40, 0x62, 0x07, 0x20, 0x98, 0x3d, 0xba, 0xd6, 0x47, 0xdb, 0xfe, 0x1b, 0x8b, - 0xa7, 0xa1, 0xf0, 0x03, 0xb8, 0x46, 0x58, 0x9b, 0x46, 0x48, 0xdc, 0x01, 0x04, 0xec, 0x2a, 0xaf, - 0xfb, 0x18, 0xa1, 0xad, 0x4f, 0x82, 0xb3, 0x57, 0xec, 0x43, 0x0a, 0xb8, 0x83, 0xaa, 0xbe, 0x80, - 0x20, 0x69, 0x99, 0x73, 0xe5, 0x85, 0xba, 0x53, 0xcf, 0xc6, 0x9d, 0xea, 0x8e, 0x04, 0x68, 0x8e, - 0x6f, 0xd4, 0xbe, 0x7c, 0xed, 0x18, 0x47, 0x3f, 0xe6, 0x8c, 0xee, 0xb7, 0x12, 0x6a, 0xec, 0x45, - 0x43, 0x08, 0x29, 0x8c, 0x7c, 0xf8, 0x88, 0xdf, 0xa1, 0x9a, 0x1c, 0xa6, 0xcf, 0x04, 0x6b, 0x99, - 0x73, 0xe6, 0x42, 0x63, 0xe5, 0x11, 0x29, 0xcc, 0xbc, 0xe8, 0x89, 0xc4, 0x43, 0x4f, 0x02, 0x09, - 0x91, 0xd9, 0x64, 0xb4, 0x4c, 0x5e, 0xf4, 0xde, 0x83, 0x2b, 0x76, 0x41, 0x30, 0x07, 0x1f, 0x8f, - 0x3b, 0x46, 0x36, 0xee, 0xa0, 0x02, 0xa3, 0x17, 0xaa, 0x78, 0x0f, 0x55, 0x92, 0x18, 0xdc, 0x56, - 0x49, 0xa9, 0xaf, 0x90, 0xa9, 0xab, 0x22, 0x13, 0xfd, 0xbd, 0x8c, 0xc1, 0x75, 0xfe, 0xd3, 0xfa, - 0x15, 0xf9, 0xa2, 0x4a, 0x0d, 0xbf, 0x45, 0x33, 0x89, 0x60, 0x22, 0x4d, 0x5a, 0x65, 0xa5, 0xbb, - 0x7a, 0x4b, 0x5d, 0xc5, 0x75, 0xfe, 0xd7, 0xca, 0x33, 0xf9, 0x9b, 0x6a, 0xcd, 0xae, 0x8b, 0xee, - 0x5c, 0x69, 0x02, 0x3f, 0x40, 0x55, 0x21, 0x21, 0xe5, 0x52, 0xdd, 0x69, 0x6a, 0x66, 0x35, 0xcf, - 0xcb, 0x63, 0x78, 0x11, 0xd5, 0x59, 0xda, 0xf7, 0x21, 0x74, 0x21, 0x69, 0x95, 0xd4, 0x32, 0x9a, - 0xd9, 0xb8, 0x53, 0xdf, 0x3c, 0x07, 0x69, 0x11, 0xef, 0xfe, 0x36, 0xd1, 0xdd, 0x6b, 0x2d, 0xe1, - 0x27, 0xa8, 0x39, 0xd1, 0x3e, 0xf4, 0x55, 0xbd, 0x9a, 0x73, 0x4f, 0xd7, 0x6b, 0x6e, 0x4e, 0x06, - 0xe9, 0xe5, 0x5c, 0xbc, 0x8b, 0x2a, 0x69, 0x02, 0x5c, 0x7b, 0xbd, 0x78, 0x03, 0x4f, 0xf6, 0x13, - 0xe0, 0x3b, 0xe1, 0x41, 0x54, 0x98, 0x2c, 0x11, 0xaa, 0x64, 0x2e, 0x8f, 0x53, 0xf9, 0xf7, 0x38, - 0xd2, 0x20, 0xe0, 0x3c, 0xe2, 0x6a, 0x21, 0x13, 0x06, 0x6d, 0x49, 0x90, 0xe6, 0xb1, 0xee, 0xf7, - 0x12, 0xaa, 0x9d, 0x97, 0xc4, 0x0f, 0x51, 0x4d, 0x96, 0x09, 0x59, 0x00, 0xda, 0xd5, 0x59, 0x4d, - 0x52, 0x39, 0x12, 0xa7, 0x17, 0x19, 0xf8, 0x3e, 0x2a, 0xa7, 0x7e, 0x5f, 0x8d, 0x56, 0x77, 0x1a, - 0x3a, 0xb1, 0xbc, 0xbf, 0xf3, 0x8c, 0x4a, 0x1c, 0x77, 0xd1, 0x8c, 0xc7, 0xa3, 0x34, 0x96, 0x3f, - 0x08, 0xd9, 0x28, 0x92, 0x6b, 0xdd, 0x56, 0x08, 0xd5, 0x11, 0xfc, 0x06, 0x55, 0x41, 0x5e, 0x8d, - 0x9a, 0xa5, 0xb1, 0xb2, 0x76, 0x0b, 0x7f, 0x88, 0x3a, 0xb7, 0xad, 0x50, 0xf0, 0xc3, 0x89, 0xd1, - 0x24, 0x46, 0x73, 0xcd, 0xb6, 0xa7, 0x4f, 0x52, 0xe5, 0xe0, 0x59, 0x54, 0x1e, 0xc2, 0x61, 0x3e, - 0x16, 0x95, 0x9f, 0xf8, 0x29, 0xaa, 0x8e, 0xe4, 0xb5, 0xea, 0xe5, 0x2c, 0xdd, 0xa0, 0x78, 0x71, - 0xe2, 0x34, 0xe7, 0x6e, 0x94, 0xd6, 0x4d, 0x67, 0xfb, 0xf8, 0xcc, 0x32, 0x4e, 0xce, 0x2c, 0xe3, - 0xf4, 0xcc, 0x32, 0x8e, 0x32, 0xcb, 0x3c, 0xce, 0x2c, 0xf3, 0x24, 0xb3, 0xcc, 0xd3, 0xcc, 0x32, - 0x7f, 0x66, 0x96, 0xf9, 0xf9, 0x97, 0x65, 0xbc, 0x9e, 0x9f, 0xfa, 0x2f, 0xfa, 0x27, 0x00, 0x00, - 0xff, 0xff, 0xb8, 0x72, 0x2c, 0x2c, 0x82, 0x05, 0x00, 0x00, + // 725 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0x4f, 0x4f, 0x13, 0x41, + 0x14, 0xef, 0xf6, 0x0f, 0x69, 0xa7, 0x56, 0x61, 0x12, 0x23, 0x69, 0xe2, 0x16, 0x6a, 0x62, 0x48, + 0x80, 0x59, 0x21, 0x04, 0x09, 0x9e, 0x58, 0x25, 0x04, 0x13, 0x62, 0x32, 0x05, 0x0f, 0xea, 0xc1, + 0xe9, 0xf6, 0xb1, 0x5d, 0x4b, 0x77, 0x37, 0xbb, 0xb3, 0x55, 0x6e, 0x7c, 0x04, 0x8f, 0x1e, 0x4d, + 0xfc, 0x24, 0xde, 0x38, 0x72, 0xc4, 0xc4, 0x34, 0xb2, 0x7e, 0x02, 0xbf, 0x81, 0x99, 0xd9, 0x61, + 0xdb, 0x82, 0x14, 0xb8, 0x78, 0xdb, 0xf9, 0xcd, 0xfb, 0xfd, 0xde, 0x7b, 0xbf, 0xf7, 0x32, 0x8b, + 0x5e, 0x76, 0xd6, 0x42, 0xe2, 0x78, 0x46, 0x27, 0x6a, 0x42, 0xe0, 0x02, 0x87, 0xd0, 0xe8, 0x81, + 0xdb, 0xf2, 0x02, 0x43, 0x5d, 0x30, 0xdf, 0x31, 0x58, 0xc4, 0xdb, 0xe0, 0x72, 0xc7, 0x62, 0xdc, + 0xf1, 0x5c, 0xa3, 0xb7, 0xd4, 0x04, 0xce, 0x96, 0x0c, 0x1b, 0x5c, 0x08, 0x18, 0x87, 0x16, 0xf1, + 0x03, 0x8f, 0x7b, 0x78, 0x36, 0xa1, 0x10, 0xe6, 0x3b, 0x64, 0x94, 0x42, 0x14, 0xa5, 0xba, 0x68, + 0x3b, 0xbc, 0x1d, 0x35, 0x89, 0xe5, 0x75, 0x0d, 0xdb, 0xb3, 0x3d, 0x43, 0x32, 0x9b, 0xd1, 0xbe, + 0x3c, 0xc9, 0x83, 0xfc, 0x4a, 0x14, 0xab, 0x0b, 0xe3, 0x8a, 0xb8, 0x98, 0xbf, 0xba, 0x32, 0x88, + 0xee, 0x32, 0xab, 0xed, 0xb8, 0x10, 0x1c, 0x1a, 0x7e, 0xc7, 0x16, 0x40, 0x68, 0x74, 0x81, 0xb3, + 0x7f, 0xb1, 0x8c, 0xab, 0x58, 0x41, 0xe4, 0x72, 0xa7, 0x0b, 0x97, 0x08, 0xab, 0xd7, 0x11, 0x42, + 0xab, 0x0d, 0x5d, 0x76, 0x91, 0x57, 0x7f, 0x8a, 0xd0, 0xe6, 0x27, 0x1e, 0xb0, 0xd7, 0xec, 0x20, + 0x02, 0x5c, 0x43, 0x05, 0x87, 0x43, 0x37, 0x9c, 0xd6, 0x66, 0x72, 0x73, 0x25, 0xb3, 0x14, 0xf7, + 0x6b, 0x85, 0x6d, 0x01, 0xd0, 0x04, 0x5f, 0x2f, 0x7e, 0xf9, 0x5a, 0xcb, 0x1c, 0xfd, 0x9c, 0xc9, + 0xd4, 0x7f, 0x68, 0x68, 0xaa, 0x01, 0x07, 0xfb, 0x8d, 0xa8, 0xf9, 0x01, 0x2c, 0x4e, 0xa1, 0xe7, + 0xc0, 0x47, 0xfc, 0x1e, 0x15, 0x45, 0x4b, 0x2d, 0xc6, 0xd9, 0xb4, 0x36, 0xa3, 0xcd, 0x95, 0x97, + 0x9f, 0x90, 0xc1, 0x00, 0xd2, 0xca, 0x88, 0xdf, 0xb1, 0x05, 0x10, 0x12, 0x11, 0x4d, 0x7a, 0x4b, + 0xe4, 0x95, 0x54, 0xd9, 0x01, 0xce, 0x4c, 0x7c, 0xdc, 0xaf, 0x65, 0xe2, 0x7e, 0x0d, 0x0d, 0x30, + 0x9a, 0xaa, 0xe2, 0x26, 0x9a, 0x08, 0x39, 0xe3, 0x51, 0x38, 0x9d, 0x95, 0xfa, 0xeb, 0xe4, 0xda, + 0x01, 0x93, 0x4b, 0x75, 0x36, 0xa4, 0x82, 0x79, 0x57, 0x65, 0x9a, 0x48, 0xce, 0x54, 0x29, 0xd7, + 0x3d, 0xf4, 0xe0, 0x0a, 0x0a, 0xde, 0x45, 0xc5, 0x28, 0x84, 0x60, 0xdb, 0xdd, 0xf7, 0x54, 0x83, + 0x8f, 0xc7, 0x16, 0x40, 0xf6, 0x54, 0xb4, 0x39, 0xa9, 0x92, 0x15, 0xcf, 0x11, 0x9a, 0x2a, 0xd5, + 0xbf, 0x65, 0x51, 0x79, 0xd7, 0xeb, 0x80, 0xfb, 0xdf, 0x6c, 0xdc, 0x45, 0xf9, 0xd0, 0x07, 0x4b, + 0x99, 0xb8, 0x7c, 0x03, 0x13, 0x87, 0xea, 0x6b, 0xf8, 0x60, 0x99, 0x77, 0x94, 0x7e, 0x5e, 0x9c, + 0xa8, 0x54, 0xc3, 0xef, 0xd2, 0xe1, 0xe4, 0xa4, 0xee, 0xca, 0x2d, 0x75, 0xc7, 0x8f, 0xc5, 0x42, + 0xf7, 0x2e, 0x14, 0x81, 0x1f, 0xa1, 0x02, 0x17, 0x90, 0x74, 0xa9, 0x64, 0x56, 0x14, 0xb3, 0x90, + 0xc4, 0x25, 0x77, 0x78, 0x1e, 0x95, 0x58, 0xd4, 0x72, 0xc0, 0xb5, 0x40, 0x6c, 0x8d, 0xd8, 0xec, + 0x4a, 0xdc, 0xaf, 0x95, 0x36, 0xce, 0x41, 0x3a, 0xb8, 0xaf, 0xff, 0xd1, 0xd0, 0xd4, 0xa5, 0x92, + 0xf0, 0x33, 0x54, 0x19, 0x2a, 0x1f, 0x5a, 0x32, 0x5f, 0xd1, 0xbc, 0xaf, 0xf2, 0x55, 0x36, 0x86, + 0x2f, 0xe9, 0x68, 0x2c, 0xde, 0x41, 0x79, 0x31, 0x69, 0xe5, 0xf5, 0xfc, 0x0d, 0x3c, 0x49, 0x97, + 0x26, 0x35, 0x59, 0x20, 0x54, 0xca, 0x8c, 0xb6, 0x93, 0x1f, 0xdf, 0x8e, 0x30, 0x08, 0x82, 0xc0, + 0x0b, 0xe4, 0x40, 0x86, 0x0c, 0xda, 0x14, 0x20, 0x4d, 0xee, 0xea, 0xdf, 0xb3, 0x28, 0xdd, 0x4a, + 0xbc, 0x90, 0x6c, 0xb8, 0xcb, 0xba, 0xa0, 0x5c, 0x1d, 0xd9, 0x5c, 0x81, 0xd3, 0x34, 0x02, 0x3f, + 0x44, 0xb9, 0xc8, 0x69, 0xc9, 0xd6, 0x4a, 0x66, 0x59, 0x05, 0xe6, 0xf6, 0xb6, 0x5f, 0x50, 0x81, + 0xe3, 0x3a, 0x9a, 0xb0, 0x03, 0x2f, 0xf2, 0xc5, 0x42, 0x88, 0x42, 0x91, 0x18, 0xeb, 0x96, 0x44, + 0xa8, 0xba, 0xc1, 0x6f, 0x51, 0x01, 0xc4, 0x13, 0x24, 0x7b, 0x29, 0x2f, 0xaf, 0xde, 0xc2, 0x1f, + 0x22, 0xdf, 0xae, 0x4d, 0x97, 0x07, 0x87, 0x43, 0xad, 0x09, 0x8c, 0x26, 0x9a, 0x55, 0x5b, 0xbd, + 0x6f, 0x32, 0x06, 0x4f, 0xa2, 0x5c, 0x07, 0x0e, 0x93, 0xb6, 0xa8, 0xf8, 0xc4, 0xcf, 0x51, 0xa1, + 0x27, 0x9e, 0x3e, 0x35, 0x9c, 0xc5, 0x1b, 0x24, 0x1f, 0xbc, 0x97, 0x34, 0xe1, 0xae, 0x67, 0xd7, + 0x34, 0x73, 0xeb, 0xf8, 0x4c, 0xcf, 0x9c, 0x9c, 0xe9, 0x99, 0xd3, 0x33, 0x3d, 0x73, 0x14, 0xeb, + 0xda, 0x71, 0xac, 0x6b, 0x27, 0xb1, 0xae, 0x9d, 0xc6, 0xba, 0xf6, 0x2b, 0xd6, 0xb5, 0xcf, 0xbf, + 0xf5, 0xcc, 0x9b, 0xd9, 0x6b, 0x7f, 0x60, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x19, 0x49, + 0x3f, 0xfd, 0x06, 0x00, 0x00, } func (m ExtraValue) Marshal() (dAtA []byte, err error) { @@ -275,6 +337,82 @@ func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SelfSubjectReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SelfSubjectReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *TokenReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -517,6 +655,30 @@ func (m ExtraValue) Size() (n int) { return n } +func (m *SelfSubjectReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.UserInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *TokenReview) Size() (n int) { if m == nil { return 0 @@ -603,6 +765,27 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *SelfSubjectReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SelfSubjectReviewStatus", "SelfSubjectReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectReviewStatus{`, + `UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *TokenReview) String() string { if this == nil { return "nil" @@ -752,6 +935,205 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } return nil } +func (m *SelfSubjectReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *TokenReview) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto index d1847a02e..53b4635d7 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto @@ -21,6 +21,7 @@ syntax = "proto2"; package k8s.io.api.authentication.v1beta1; +import "k8s.io/api/authentication/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -37,6 +38,26 @@ message ExtraValue { repeated string items = 1; } +// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. +message SelfSubjectReview { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Status is filled in by the server with the user attributes. + optional SelfSubjectReviewStatus status = 2; +} + +// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user. +message SelfSubjectReviewStatus { + // User attributes of the user making this request. + // +optional + optional k8s.io.api.authentication.v1.UserInfo userInfo = 1; +} + // TokenReview attempts to authenticate a token to a known user. // Note: TokenReview requests may be cached by the webhook token authenticator // plugin in the kube-apiserver. diff --git a/vendor/k8s.io/api/authentication/v1beta1/register.go b/vendor/k8s.io/api/authentication/v1beta1/register.go index ed23e50f7..075ee1263 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/register.go +++ b/vendor/k8s.io/api/authentication/v1beta1/register.go @@ -44,6 +44,7 @@ var ( // Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, + &SelfSubjectReview{}, &TokenReview{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/authentication/v1beta1/types.go b/vendor/k8s.io/api/authentication/v1beta1/types.go index 08e1e09b6..5bce82e7c 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types.go @@ -19,6 +19,7 @@ package v1beta1 import ( "fmt" + v1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -113,3 +114,29 @@ type ExtraValue []string func (t ExtraValue) String() string { return fmt.Sprintf("%v", []string(t)) } + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. +type SelfSubjectReview struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Status is filled in by the server with the user attributes. + Status SelfSubjectReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user. +type SelfSubjectReviewStatus struct { + // User attributes of the user making this request. + // +optional + UserInfo v1.UserInfo `json:"userInfo,omitempty" protobuf:"bytes,1,opt,name=userInfo"` +} diff --git a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go index 1086955c3..d6644f2cf 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go @@ -24,9 +24,28 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_SelfSubjectReview = map[string]string{ + "": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "Status is filled in by the server with the user attributes.", +} + +func (SelfSubjectReview) SwaggerDoc() map[string]string { + return map_SelfSubjectReview +} + +var map_SelfSubjectReviewStatus = map[string]string{ + "": "SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.", + "userInfo": "User attributes of the user making this request.", +} + +func (SelfSubjectReviewStatus) SwaggerDoc() map[string]string { + return map_SelfSubjectReviewStatus +} + var map_TokenReview = map[string]string{ "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go index 059ec1a86..99ffadf7b 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go @@ -45,6 +45,50 @@ func (in ExtraValue) DeepCopy() ExtraValue { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectReview) DeepCopyInto(out *SelfSubjectReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReview. +func (in *SelfSubjectReview) DeepCopy() *SelfSubjectReview { + if in == nil { + return nil + } + out := new(SelfSubjectReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectReviewStatus) DeepCopyInto(out *SelfSubjectReviewStatus) { + *out = *in + in.UserInfo.DeepCopyInto(&out.UserInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReviewStatus. +func (in *SelfSubjectReviewStatus) DeepCopy() *SelfSubjectReviewStatus { + if in == nil { + return nil + } + out := new(SelfSubjectReviewStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go index e448106e4..904796925 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go @@ -25,6 +25,24 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" ) +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SelfSubjectReview) APILifecycleIntroduced() (major, minor int) { + return 1, 27 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *SelfSubjectReview) APILifecycleDeprecated() (major, minor int) { + return 1, 30 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *SelfSubjectReview) APILifecycleRemoved() (major, minor int) { + return 1, 33 +} + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *TokenReview) APILifecycleIntroduced() (major, minor int) { diff --git a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go index 2e5fbea7a..93229485c 100644 --- a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_LocalSubjectAccessReview = map[string]string{ diff --git a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go index 2d291189e..e0846be7a 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_LocalSubjectAccessReview = map[string]string{ diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto index 9820705e7..1dbafd1a5 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -87,13 +87,13 @@ message ContainerResourceMetricStatus { // CrossVersionObjectReference contains enough information to let you identify the referred resource. // +structType=atomic message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 2; - // API version of the referent + // apiVersion is the API version of the referent // +optional optional string apiVersion = 3; } @@ -147,11 +147,11 @@ message HorizontalPodAutoscaler { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; - // current information about the autoscaler. + // status is the current information about the autoscaler. // +optional optional HorizontalPodAutoscalerStatus status = 3; } @@ -186,7 +186,7 @@ message HorizontalPodAutoscalerList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // list of horizontal pod autoscaler objects. + // items is the list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; } @@ -204,10 +204,10 @@ message HorizontalPodAutoscalerSpec { // +optional optional int32 minReplicas = 2; - // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + // maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. optional int32 maxReplicas = 3; - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + // targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; // if not specified the default autoscaling policy will be used. // +optional optional int32 targetCPUUtilizationPercentage = 4; @@ -215,22 +215,22 @@ message HorizontalPodAutoscalerSpec { // current status of a horizontal pod autoscaler message HorizontalPodAutoscalerStatus { - // most recent generation observed by this autoscaler. + // observedGeneration is the most recent generation observed by this autoscaler. // +optional optional int64 observedGeneration = 1; - // last time the HorizontalPodAutoscaler scaled the number of pods; + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; // used by the autoscaler to control how often the number of pods is changed. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; - // current number of replicas of pods managed by this autoscaler. + // currentReplicas is the current number of replicas of pods managed by this autoscaler. optional int32 currentReplicas = 3; - // desired number of replicas of pods managed by this autoscaler. + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler. optional int32 desiredReplicas = 4; - // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, // e.g. 70 means that an average pod is using now 70% of its requested CPU. // +optional optional int32 currentCPUUtilizationPercentage = 5; @@ -264,7 +264,7 @@ message MetricSpec { // +optional optional ResourceMetricSource resource = 4; - // container resource refers to a resource metric (such as those specified in + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod of the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available @@ -309,7 +309,7 @@ message MetricStatus { // +optional optional ResourceMetricStatus resource = 4; - // container resource refers to a resource metric (such as those specified in + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available @@ -464,31 +464,31 @@ message Scale { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // status is the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } // ScaleSpec describes the attributes of a scale subresource. message ScaleSpec { - // desired number of instances for the scaled object. + // replicas is the desired number of instances for the scaled object. // +optional optional int32 replicas = 1; } // ScaleStatus represents the current status of a scale subresource. message ScaleStatus { - // actual number of observed instances of the scaled object. + // replicas is the actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. This is same + // selector is the label query over pods that should match the replicas count. This is same // as the label selector but in the string format to avoid introspection // by clients. The string will be in the same format as the query-param syntax. - // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional optional string selector = 2; } diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go index 8e0a46525..450829017 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -25,11 +25,13 @@ import ( // CrossVersionObjectReference contains enough information to let you identify the referred resource. // +structType=atomic type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent + + // apiVersion is the API version of the referent // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` } @@ -46,9 +48,11 @@ type HorizontalPodAutoscalerSpec struct { // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` - // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + + // maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + + // targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; // if not specified the default autoscaling policy will be used. // +optional TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty" protobuf:"varint,4,opt,name=targetCPUUtilizationPercentage"` @@ -56,22 +60,22 @@ type HorizontalPodAutoscalerSpec struct { // current status of a horizontal pod autoscaler type HorizontalPodAutoscalerStatus struct { - // most recent generation observed by this autoscaler. + // observedGeneration is the most recent generation observed by this autoscaler. // +optional ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - // last time the HorizontalPodAutoscaler scaled the number of pods; + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; // used by the autoscaler to control how often the number of pods is changed. // +optional LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` - // current number of replicas of pods managed by this autoscaler. + // currentReplicas is the current number of replicas of pods managed by this autoscaler. CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` - // desired number of replicas of pods managed by this autoscaler. + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler. DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` - // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, // e.g. 70 means that an average pod is using now 70% of its requested CPU. // +optional CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty" protobuf:"varint,5,opt,name=currentCPUUtilizationPercentage"` @@ -87,11 +91,11 @@ type HorizontalPodAutoscaler struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current information about the autoscaler. + // status is the current information about the autoscaler. // +optional Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -105,7 +109,7 @@ type HorizontalPodAutoscalerList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // list of horizontal pod autoscaler objects. + // items is the list of horizontal pod autoscaler objects. Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -118,31 +122,31 @@ type Scale struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // status is the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // ScaleSpec describes the attributes of a scale subresource. type ScaleSpec struct { - // desired number of instances for the scaled object. + // replicas is the desired number of instances for the scaled object. // +optional Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` } // ScaleStatus represents the current status of a scale subresource. type ScaleStatus struct { - // actual number of observed instances of the scaled object. + // replicas is the actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. This is same + // selector is the label query over pods that should match the replicas count. This is same // as the label selector but in the string format to avoid introspection // by clients. The string will be in the same format as the query-param syntax. - // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional Selector string `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` } @@ -194,11 +198,13 @@ type MetricSpec struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -206,7 +212,8 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // container resource refers to a resource metric (such as those specified in + + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod of the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available @@ -214,6 +221,7 @@ type MetricSpec struct { // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -231,6 +239,7 @@ type ObjectMetricSource struct { // metricName is the name of the metric in question. MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // targetValue is the target value of the metric (as a quantity). TargetValue resource.Quantity `json:"targetValue" protobuf:"bytes,3,name=targetValue"` @@ -239,6 +248,7 @@ type ObjectMetricSource struct { // When unset, just the metricName will be used to gather metrics. // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"` + // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional @@ -252,6 +262,7 @@ type ObjectMetricSource struct { type PodsMetricSource struct { // metricName is the name of the metric in question MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // targetAverageValue is the target value of the average of the // metric across all relevant pods (as a quantity) TargetAverageValue resource.Quantity `json:"targetAverageValue" protobuf:"bytes,2,name=targetAverageValue"` @@ -273,11 +284,13 @@ type PodsMetricSource struct { type ResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // +optional TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the target value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. @@ -295,16 +308,19 @@ type ResourceMetricSource struct { type ContainerResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // +optional TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the target value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` + // container is the name of the container in the pods of the scaling target. Container string `json:"container" protobuf:"bytes,5,opt,name=container"` } @@ -315,14 +331,17 @@ type ContainerResourceMetricSource struct { type ExternalMetricSource struct { // metricName is the name of the metric in question. MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // metricSelector is used to identify a specific time series // within a given metric. // +optional MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` + // targetValue is the target value of the metric (as a quantity). // Mutually exclusive with TargetAverageValue. // +optional TargetValue *resource.Quantity `json:"targetValue,omitempty" protobuf:"bytes,3,opt,name=targetValue"` + // targetAverageValue is the target per-pod value of global metric (as a quantity). // Mutually exclusive with TargetValue. // +optional @@ -341,11 +360,13 @@ type MetricStatus struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -353,13 +374,15 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // container resource refers to a resource metric (such as those specified in + + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics using the "pods" source. // +optional ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -390,15 +413,19 @@ const ( type HorizontalPodAutoscalerCondition struct { // type describes the current condition Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about // the transition // +optional @@ -413,6 +440,7 @@ type ObjectMetricStatus struct { // metricName is the name of the metric in question. MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // currentValue is the current value of the metric (as a quantity). CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` @@ -421,6 +449,7 @@ type ObjectMetricStatus struct { // When unset, just the metricName will be used to gather metrics. // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"` + // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional @@ -432,6 +461,7 @@ type ObjectMetricStatus struct { type PodsMetricStatus struct { // metricName is the name of the metric in question MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // currentAverageValue is the current value of the average of the // metric across all relevant pods (as a quantity) CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,2,name=currentAverageValue"` @@ -451,6 +481,7 @@ type PodsMetricStatus struct { type ResourceMetricStatus struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. It will only be @@ -458,6 +489,7 @@ type ResourceMetricStatus struct { // specification. // +optional CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the current value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. @@ -473,6 +505,7 @@ type ResourceMetricStatus struct { type ContainerResourceMetricStatus struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. It will only be @@ -480,11 +513,13 @@ type ContainerResourceMetricStatus struct { // specification. // +optional CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the current value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` + // container is the name of the container in the pods of the scaling taget Container string `json:"container" protobuf:"bytes,4,opt,name=container"` } @@ -495,12 +530,14 @@ type ExternalMetricStatus struct { // metricName is the name of a metric used for autoscaling in // metric system. MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // metricSelector is used to identify a specific time series // within a given metric. // +optional MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` // currentValue is the current value of the metric (as a quantity) CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` + // currentAverageValue is the current value of metric averaged over autoscaled pods. // +optional CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty" protobuf:"bytes,4,opt,name=currentAverageValue"` diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index 192dc5f39..37c2b36a5 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ @@ -53,9 +53,9 @@ func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "apiVersion": "API version of the referent", + "kind": "kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "name": "name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "apiVersion": "apiVersion is the API version of the referent", } func (CrossVersionObjectReference) SwaggerDoc() map[string]string { @@ -89,8 +89,8 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "configuration of a horizontal pod autoscaler.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current information about the autoscaler.", + "spec": "spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "status is the current information about the autoscaler.", } func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { @@ -113,7 +113,7 @@ func (HorizontalPodAutoscalerCondition) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerList = map[string]string{ "": "list of horizontal pod autoscaler objects.", "metadata": "Standard list metadata.", - "items": "list of horizontal pod autoscaler objects.", + "items": "items is the list of horizontal pod autoscaler objects.", } func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { @@ -124,8 +124,8 @@ var map_HorizontalPodAutoscalerSpec = map[string]string{ "": "specification of a horizontal pod autoscaler.", "scaleTargetRef": "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.", "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", - "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", - "targetCPUUtilizationPercentage": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", + "maxReplicas": "maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", + "targetCPUUtilizationPercentage": "targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", } func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { @@ -134,11 +134,11 @@ func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerStatus = map[string]string{ "": "current status of a horizontal pod autoscaler", - "observedGeneration": "most recent generation observed by this autoscaler.", - "lastScaleTime": "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.", - "currentReplicas": "current number of replicas of pods managed by this autoscaler.", - "desiredReplicas": "desired number of replicas of pods managed by this autoscaler.", - "currentCPUUtilizationPercentage": "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", + "observedGeneration": "observedGeneration is the most recent generation observed by this autoscaler.", + "lastScaleTime": "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.", + "currentReplicas": "currentReplicas is the current number of replicas of pods managed by this autoscaler.", + "desiredReplicas": "desiredReplicas is the desired number of replicas of pods managed by this autoscaler.", + "currentCPUUtilizationPercentage": "currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", } func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { @@ -151,7 +151,7 @@ var map_MetricSpec = map[string]string{ "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -165,7 +165,7 @@ var map_MetricStatus = map[string]string{ "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -246,8 +246,8 @@ func (ResourceMetricStatus) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", + "spec": "spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "status is the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { @@ -256,7 +256,7 @@ func (Scale) SwaggerDoc() map[string]string { var map_ScaleSpec = map[string]string{ "": "ScaleSpec describes the attributes of a scale subresource.", - "replicas": "desired number of instances for the scaled object.", + "replicas": "replicas is the desired number of instances for the scaled object.", } func (ScaleSpec) SwaggerDoc() map[string]string { @@ -265,8 +265,8 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "ScaleStatus represents the current status of a scale subresource.", - "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "replicas": "replicas is the actual number of observed instances of the scaled object.", + "selector": "selector is the label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", } func (ScaleStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v2/generated.proto b/vendor/k8s.io/api/autoscaling/v2/generated.proto index 0595f1eb8..a9e36975f 100644 --- a/vendor/k8s.io/api/autoscaling/v2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2/generated.proto @@ -54,25 +54,25 @@ message ContainerResourceMetricSource { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. message ContainerResourceMetricStatus { - // Name is the name of the resource in question. + // name is the name of the resource in question. optional string name = 1; // current contains the current value for the given metric optional MetricValueStatus current = 2; - // Container is the name of the container in the pods of the scaling target + // container is the name of the container in the pods of the scaling target optional string container = 3; } // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 2; - // API version of the referent + // apiVersion is the API version of the referent // +optional optional string apiVersion = 3; } @@ -100,14 +100,14 @@ message ExternalMetricStatus { // HPAScalingPolicy is a single policy which must hold true for a specified past interval. message HPAScalingPolicy { - // Type is used to specify the scaling policy. + // type is used to specify the scaling policy. optional string type = 1; - // Value contains the amount of change which is permitted by the policy. + // value contains the amount of change which is permitted by the policy. // It must be greater than zero optional int32 value = 2; - // PeriodSeconds specifies the window of time for which the policy should hold true. + // periodSeconds specifies the window of time for which the policy should hold true. // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). optional int32 periodSeconds = 3; } @@ -119,7 +119,7 @@ message HPAScalingPolicy { // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. message HPAScalingRules { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). // If not set, use the default values: @@ -495,7 +495,7 @@ message ResourceMetricSource { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. message ResourceMetricStatus { - // Name is the name of the resource in question. + // name is the name of the resource in question. optional string name = 1; // current contains the current value for the given metric diff --git a/vendor/k8s.io/api/autoscaling/v2/types.go b/vendor/k8s.io/api/autoscaling/v2/types.go index 9931f6146..c12a83df1 100644 --- a/vendor/k8s.io/api/autoscaling/v2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2/types.go @@ -59,9 +59,11 @@ type HorizontalPodAutoscalerSpec struct { // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. // It cannot be less that minReplicas. MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // metrics contains the specifications for which to use to calculate the // desired replica count (the maximum replica count across all metrics will // be used). The desired replica count is calculated multiplying the @@ -83,11 +85,13 @@ type HorizontalPodAutoscalerSpec struct { // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent + + // apiVersion is the API version of the referent // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` } @@ -105,11 +109,13 @@ type MetricSpec struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -117,6 +123,7 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in // each pod of the current scale target (e.g. CPU or memory). Such metrics are @@ -125,6 +132,7 @@ type MetricSpec struct { // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -144,6 +152,7 @@ type HorizontalPodAutoscalerBehavior struct { // No stabilization is used. // +optional ScaleUp *HPAScalingRules `json:"scaleUp,omitempty" protobuf:"bytes,1,opt,name=scaleUp"` + // scaleDown is scaling policy for scaling Down. // If not set, the default value is to allow to scale down to minReplicas pods, with a // 300 second stabilization window (i.e., the highest recommendation for @@ -171,7 +180,7 @@ const ( // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. type HPAScalingRules struct { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). // If not set, use the default values: @@ -179,10 +188,12 @@ type HPAScalingRules struct { // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). // +optional StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` + // selectPolicy is used to specify which policy should be used. // If not set, the default value Max is used. // +optional SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"` + // policies is a list of potential scaling polices which can be used during scaling. // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid // +listType=atomic @@ -203,12 +214,14 @@ const ( // HPAScalingPolicy is a single policy which must hold true for a specified past interval. type HPAScalingPolicy struct { - // Type is used to specify the scaling policy. + // type is used to specify the scaling policy. Type HPAScalingPolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=HPAScalingPolicyType"` - // Value contains the amount of change which is permitted by the policy. + + // value contains the amount of change which is permitted by the policy. // It must be greater than zero Value int32 `json:"value" protobuf:"varint,2,opt,name=value"` - // PeriodSeconds specifies the window of time for which the policy should hold true. + + // periodSeconds specifies the window of time for which the policy should hold true. // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). PeriodSeconds int32 `json:"periodSeconds" protobuf:"varint,3,opt,name=periodSeconds"` } @@ -249,8 +262,10 @@ const ( type ObjectMetricSource struct { // describedObject specifies the descriptions of a object,such as kind,name apiVersion DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,1,name=describedObject"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,3,name=metric"` } @@ -262,6 +277,7 @@ type ObjectMetricSource struct { type PodsMetricSource struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -276,6 +292,7 @@ type PodsMetricSource struct { type ResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -290,8 +307,10 @@ type ResourceMetricSource struct { type ContainerResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // container is the name of the container in the pods of the scaling target Container string `json:"container" protobuf:"bytes,3,opt,name=container"` } @@ -302,6 +321,7 @@ type ContainerResourceMetricSource struct { type ExternalMetricSource struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -310,6 +330,7 @@ type ExternalMetricSource struct { type MetricIdentifier struct { // name is the name of the given metric Name string `json:"name" protobuf:"bytes,1,name=name"` + // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. @@ -321,13 +342,16 @@ type MetricIdentifier struct { type MetricTarget struct { // type represents whether the metric type is Utilization, Value, or AverageValue Type MetricTargetType `json:"type" protobuf:"bytes,1,name=type"` + // value is the target value of the metric (as a quantity). // +optional Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,3,opt,name=averageValue"` + // averageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. @@ -405,15 +429,19 @@ const ( type HorizontalPodAutoscalerCondition struct { // type describes the current condition Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about // the transition // +optional @@ -432,11 +460,13 @@ type MetricStatus struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -444,6 +474,7 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -451,6 +482,7 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -465,8 +497,10 @@ type MetricStatus struct { type ObjectMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` + // DescribedObject specifies the descriptions of a object,such as kind,name apiVersion DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,3,name=describedObject"` } @@ -476,6 +510,7 @@ type ObjectMetricStatus struct { type PodsMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -486,8 +521,9 @@ type PodsMetricStatus struct { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. type ResourceMetricStatus struct { - // Name is the name of the resource in question. + // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -498,11 +534,13 @@ type ResourceMetricStatus struct { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. type ContainerResourceMetricStatus struct { - // Name is the name of the resource in question. + // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` - // Container is the name of the container in the pods of the scaling target + + // container is the name of the container in the pods of the scaling target Container string `json:"container" protobuf:"bytes,3,opt,name=container"` } @@ -511,6 +549,7 @@ type ContainerResourceMetricStatus struct { type ExternalMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -520,10 +559,12 @@ type MetricValueStatus struct { // value is the current value of the metric (as a quantity). // +optional Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,1,opt,name=value"` + // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,2,opt,name=averageValue"` + // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. diff --git a/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go index 05355a552..1941b1ef5 100644 --- a/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ @@ -40,9 +40,9 @@ func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { var map_ContainerResourceMetricStatus = map[string]string{ "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", + "name": "name is the name of the resource in question.", "current": "current contains the current value for the given metric", - "container": "Container is the name of the container in the pods of the scaling target", + "container": "container is the name of the container in the pods of the scaling target", } func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { @@ -51,9 +51,9 @@ func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "apiVersion": "API version of the referent", + "kind": "kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "name": "name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "apiVersion": "apiVersion is the API version of the referent", } func (CrossVersionObjectReference) SwaggerDoc() map[string]string { @@ -82,9 +82,9 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HPAScalingPolicy = map[string]string{ "": "HPAScalingPolicy is a single policy which must hold true for a specified past interval.", - "type": "Type is used to specify the scaling policy.", - "value": "Value contains the amount of change which is permitted by the policy. It must be greater than zero", - "periodSeconds": "PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", + "type": "type is used to specify the scaling policy.", + "value": "value contains the amount of change which is permitted by the policy. It must be greater than zero", + "periodSeconds": "periodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", } func (HPAScalingPolicy) SwaggerDoc() map[string]string { @@ -93,7 +93,7 @@ func (HPAScalingPolicy) SwaggerDoc() map[string]string { var map_HPAScalingRules = map[string]string{ "": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", - "stabilizationWindowSeconds": "StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", + "stabilizationWindowSeconds": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", "selectPolicy": "selectPolicy is used to specify which policy should be used. If not set, the default value Max is used.", "policies": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", } @@ -288,7 +288,7 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string { var map_ResourceMetricStatus = map[string]string{ "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", + "name": "name is the name of the resource in question.", "current": "current contains the current value for the given metric", } diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index ebd3b31fa..6b3d41521 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -86,10 +86,10 @@ message ContainerResourceMetricStatus { // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // Name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 2; // API version of the referent diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go index 22bb7699e..842284072 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -24,9 +24,9 @@ import ( // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // Name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` // API version of the referent // +optional diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go index 08a54621f..d656ee416 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v2beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ @@ -53,8 +53,8 @@ func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "name": "Name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "apiVersion": "API version of the referent", } diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index 920031b1a..5b2fe9442 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -54,25 +54,25 @@ message ContainerResourceMetricSource { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. message ContainerResourceMetricStatus { - // Name is the name of the resource in question. + // name is the name of the resource in question. optional string name = 1; // current contains the current value for the given metric optional MetricValueStatus current = 2; - // Container is the name of the container in the pods of the scaling target + // container is the name of the container in the pods of the scaling target optional string container = 3; } // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 2; - // API version of the referent + // apiVersion is the API version of the referent // +optional optional string apiVersion = 3; } @@ -100,14 +100,14 @@ message ExternalMetricStatus { // HPAScalingPolicy is a single policy which must hold true for a specified past interval. message HPAScalingPolicy { - // Type is used to specify the scaling policy. + // type is used to specify the scaling policy. optional string type = 1; - // Value contains the amount of change which is permitted by the policy. + // value contains the amount of change which is permitted by the policy. // It must be greater than zero optional int32 value = 2; - // PeriodSeconds specifies the window of time for which the policy should hold true. + // periodSeconds specifies the window of time for which the policy should hold true. // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). optional int32 periodSeconds = 3; } @@ -119,7 +119,7 @@ message HPAScalingPolicy { // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. message HPAScalingRules { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). // If not set, use the default values: @@ -361,7 +361,7 @@ message MetricStatus { // +optional optional ResourceMetricStatus resource = 4; - // container resource refers to a resource metric (such as those specified in + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available @@ -411,7 +411,7 @@ message MetricValueStatus { // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; - // currentAverageUtilization is the current value of the average of the + // averageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // +optional @@ -485,7 +485,7 @@ message ResourceMetricSource { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. message ResourceMetricStatus { - // Name is the name of the resource in question. + // name is the name of the resource in question. optional string name = 1; // current contains the current value for the given metric diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go index 10bdec5b9..b0b7681c0 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -62,9 +62,11 @@ type HorizontalPodAutoscalerSpec struct { // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. // It cannot be less that minReplicas. MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // metrics contains the specifications for which to use to calculate the // desired replica count (the maximum replica count across all metrics will // be used). The desired replica count is calculated multiplying the @@ -85,11 +87,13 @@ type HorizontalPodAutoscalerSpec struct { // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent + + // apiVersion is the API version of the referent // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` } @@ -107,11 +111,13 @@ type MetricSpec struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -119,6 +125,7 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in // each pod of the current scale target (e.g. CPU or memory). Such metrics are @@ -127,6 +134,7 @@ type MetricSpec struct { // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -146,6 +154,7 @@ type HorizontalPodAutoscalerBehavior struct { // No stabilization is used. // +optional ScaleUp *HPAScalingRules `json:"scaleUp,omitempty" protobuf:"bytes,1,opt,name=scaleUp"` + // scaleDown is scaling policy for scaling Down. // If not set, the default value is to allow to scale down to minReplicas pods, with a // 300 second stabilization window (i.e., the highest recommendation for @@ -173,7 +182,7 @@ const ( // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. type HPAScalingRules struct { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). // If not set, use the default values: @@ -181,10 +190,12 @@ type HPAScalingRules struct { // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). // +optional StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` + // selectPolicy is used to specify which policy should be used. // If not set, the default value MaxPolicySelect is used. // +optional SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"` + // policies is a list of potential scaling polices which can be used during scaling. // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid // +optional @@ -204,12 +215,14 @@ const ( // HPAScalingPolicy is a single policy which must hold true for a specified past interval. type HPAScalingPolicy struct { - // Type is used to specify the scaling policy. + // type is used to specify the scaling policy. Type HPAScalingPolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=HPAScalingPolicyType"` - // Value contains the amount of change which is permitted by the policy. + + // value contains the amount of change which is permitted by the policy. // It must be greater than zero Value int32 `json:"value" protobuf:"varint,2,opt,name=value"` - // PeriodSeconds specifies the window of time for which the policy should hold true. + + // periodSeconds specifies the window of time for which the policy should hold true. // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). PeriodSeconds int32 `json:"periodSeconds" protobuf:"varint,3,opt,name=periodSeconds"` } @@ -251,6 +264,7 @@ type ObjectMetricSource struct { DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,1,name=describedObject"` // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,3,name=metric"` } @@ -262,6 +276,7 @@ type ObjectMetricSource struct { type PodsMetricSource struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -276,6 +291,7 @@ type PodsMetricSource struct { type ResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -290,8 +306,10 @@ type ResourceMetricSource struct { type ContainerResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // container is the name of the container in the pods of the scaling target Container string `json:"container" protobuf:"bytes,3,opt,name=container"` } @@ -302,6 +320,7 @@ type ContainerResourceMetricSource struct { type ExternalMetricSource struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -310,6 +329,7 @@ type ExternalMetricSource struct { type MetricIdentifier struct { // name is the name of the given metric Name string `json:"name" protobuf:"bytes,1,name=name"` + // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. @@ -321,13 +341,16 @@ type MetricIdentifier struct { type MetricTarget struct { // type represents whether the metric type is Utilization, Value, or AverageValue Type MetricTargetType `json:"type" protobuf:"bytes,1,name=type"` + // value is the target value of the metric (as a quantity). // +optional Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,3,opt,name=averageValue"` + // averageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. @@ -399,15 +422,19 @@ const ( type HorizontalPodAutoscalerCondition struct { // type describes the current condition Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about // the transition // +optional @@ -426,6 +453,7 @@ type MetricStatus struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. @@ -438,13 +466,15 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // container resource refers to a resource metric (such as those specified in + + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics using the "pods" source. // +optional ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -459,6 +489,7 @@ type MetricStatus struct { type ObjectMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` @@ -470,6 +501,7 @@ type ObjectMetricStatus struct { type PodsMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -480,8 +512,9 @@ type PodsMetricStatus struct { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. type ResourceMetricStatus struct { - // Name is the name of the resource in question. + // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -492,11 +525,13 @@ type ResourceMetricStatus struct { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. type ContainerResourceMetricStatus struct { - // Name is the name of the resource in question. + // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` - // Container is the name of the container in the pods of the scaling target + + // container is the name of the container in the pods of the scaling target Container string `json:"container" protobuf:"bytes,3,opt,name=container"` } @@ -505,6 +540,7 @@ type ContainerResourceMetricStatus struct { type ExternalMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -514,11 +550,13 @@ type MetricValueStatus struct { // value is the current value of the metric (as a quantity). // +optional Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,1,opt,name=value"` + // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,2,opt,name=averageValue"` - // currentAverageUtilization is the current value of the average of the + + // averageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // +optional diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go index e3ea3002b..4af7d0ec0 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v2beta2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ @@ -40,9 +40,9 @@ func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { var map_ContainerResourceMetricStatus = map[string]string{ "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", + "name": "name is the name of the resource in question.", "current": "current contains the current value for the given metric", - "container": "Container is the name of the container in the pods of the scaling target", + "container": "container is the name of the container in the pods of the scaling target", } func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { @@ -51,9 +51,9 @@ func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "apiVersion": "API version of the referent", + "kind": "kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "name": "name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "apiVersion": "apiVersion is the API version of the referent", } func (CrossVersionObjectReference) SwaggerDoc() map[string]string { @@ -82,9 +82,9 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HPAScalingPolicy = map[string]string{ "": "HPAScalingPolicy is a single policy which must hold true for a specified past interval.", - "type": "Type is used to specify the scaling policy.", - "value": "Value contains the amount of change which is permitted by the policy. It must be greater than zero", - "periodSeconds": "PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", + "type": "type is used to specify the scaling policy.", + "value": "value contains the amount of change which is permitted by the policy. It must be greater than zero", + "periodSeconds": "periodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", } func (HPAScalingPolicy) SwaggerDoc() map[string]string { @@ -93,7 +93,7 @@ func (HPAScalingPolicy) SwaggerDoc() map[string]string { var map_HPAScalingRules = map[string]string{ "": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", - "stabilizationWindowSeconds": "StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", + "stabilizationWindowSeconds": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", "selectPolicy": "selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used.", "policies": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", } @@ -203,7 +203,7 @@ var map_MetricStatus = map[string]string{ "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -227,7 +227,7 @@ var map_MetricValueStatus = map[string]string{ "": "MetricValueStatus holds the current value for a metric", "value": "value is the current value of the metric (as a quantity).", "averageValue": "averageValue is the current value of the average of the metric across all relevant pods (as a quantity)", - "averageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "averageUtilization": "averageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", } func (MetricValueStatus) SwaggerDoc() map[string]string { @@ -286,7 +286,7 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string { var map_ResourceMetricStatus = map[string]string{ "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", + "name": "name is the name of the resource in question.", "current": "current contains the current value for the given metric", } diff --git a/vendor/k8s.io/api/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go index 483406d84..59a7482a0 100644 --- a/vendor/k8s.io/api/batch/v1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1/generated.pb.go @@ -328,10 +328,126 @@ func (m *JobTemplateSpec) XXX_DiscardUnknown() { var xxx_messageInfo_JobTemplateSpec proto.InternalMessageInfo +func (m *PodFailurePolicy) Reset() { *m = PodFailurePolicy{} } +func (*PodFailurePolicy) ProtoMessage() {} +func (*PodFailurePolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{10} +} +func (m *PodFailurePolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodFailurePolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodFailurePolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodFailurePolicy.Merge(m, src) +} +func (m *PodFailurePolicy) XXX_Size() int { + return m.Size() +} +func (m *PodFailurePolicy) XXX_DiscardUnknown() { + xxx_messageInfo_PodFailurePolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_PodFailurePolicy proto.InternalMessageInfo + +func (m *PodFailurePolicyOnExitCodesRequirement) Reset() { + *m = PodFailurePolicyOnExitCodesRequirement{} +} +func (*PodFailurePolicyOnExitCodesRequirement) ProtoMessage() {} +func (*PodFailurePolicyOnExitCodesRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{11} +} +func (m *PodFailurePolicyOnExitCodesRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodFailurePolicyOnExitCodesRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodFailurePolicyOnExitCodesRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodFailurePolicyOnExitCodesRequirement.Merge(m, src) +} +func (m *PodFailurePolicyOnExitCodesRequirement) XXX_Size() int { + return m.Size() +} +func (m *PodFailurePolicyOnExitCodesRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_PodFailurePolicyOnExitCodesRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_PodFailurePolicyOnExitCodesRequirement proto.InternalMessageInfo + +func (m *PodFailurePolicyOnPodConditionsPattern) Reset() { + *m = PodFailurePolicyOnPodConditionsPattern{} +} +func (*PodFailurePolicyOnPodConditionsPattern) ProtoMessage() {} +func (*PodFailurePolicyOnPodConditionsPattern) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{12} +} +func (m *PodFailurePolicyOnPodConditionsPattern) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodFailurePolicyOnPodConditionsPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodFailurePolicyOnPodConditionsPattern) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodFailurePolicyOnPodConditionsPattern.Merge(m, src) +} +func (m *PodFailurePolicyOnPodConditionsPattern) XXX_Size() int { + return m.Size() +} +func (m *PodFailurePolicyOnPodConditionsPattern) XXX_DiscardUnknown() { + xxx_messageInfo_PodFailurePolicyOnPodConditionsPattern.DiscardUnknown(m) +} + +var xxx_messageInfo_PodFailurePolicyOnPodConditionsPattern proto.InternalMessageInfo + +func (m *PodFailurePolicyRule) Reset() { *m = PodFailurePolicyRule{} } +func (*PodFailurePolicyRule) ProtoMessage() {} +func (*PodFailurePolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{13} +} +func (m *PodFailurePolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodFailurePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodFailurePolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodFailurePolicyRule.Merge(m, src) +} +func (m *PodFailurePolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PodFailurePolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PodFailurePolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_PodFailurePolicyRule proto.InternalMessageInfo + func (m *UncountedTerminatedPods) Reset() { *m = UncountedTerminatedPods{} } func (*UncountedTerminatedPods) ProtoMessage() {} func (*UncountedTerminatedPods) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{10} + return fileDescriptor_3b52da57c93de713, []int{14} } func (m *UncountedTerminatedPods) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -367,6 +483,10 @@ func init() { proto.RegisterType((*JobSpec)(nil), "k8s.io.api.batch.v1.JobSpec") proto.RegisterType((*JobStatus)(nil), "k8s.io.api.batch.v1.JobStatus") proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v1.JobTemplateSpec") + proto.RegisterType((*PodFailurePolicy)(nil), "k8s.io.api.batch.v1.PodFailurePolicy") + proto.RegisterType((*PodFailurePolicyOnExitCodesRequirement)(nil), "k8s.io.api.batch.v1.PodFailurePolicyOnExitCodesRequirement") + proto.RegisterType((*PodFailurePolicyOnPodConditionsPattern)(nil), "k8s.io.api.batch.v1.PodFailurePolicyOnPodConditionsPattern") + proto.RegisterType((*PodFailurePolicyRule)(nil), "k8s.io.api.batch.v1.PodFailurePolicyRule") proto.RegisterType((*UncountedTerminatedPods)(nil), "k8s.io.api.batch.v1.UncountedTerminatedPods") } @@ -375,97 +495,120 @@ func init() { } var fileDescriptor_3b52da57c93de713 = []byte{ - // 1431 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x41, 0x6f, 0x1b, 0xc5, - 0x17, 0xcf, 0xc6, 0x71, 0x6c, 0x8f, 0x93, 0xd4, 0x9d, 0xfe, 0xdb, 0xfa, 0x6f, 0x2a, 0x6f, 0x6a, - 0x0a, 0x0a, 0xa8, 0xac, 0x49, 0x88, 0x10, 0x20, 0x40, 0xca, 0xa6, 0x2a, 0x34, 0x38, 0x6a, 0x18, - 0x3b, 0x42, 0x2a, 0x05, 0xb1, 0xde, 0x1d, 0x3b, 0xdb, 0xac, 0x77, 0xac, 0x9d, 0x71, 0x84, 0x6f, - 0x48, 0x7c, 0x01, 0xf8, 0x12, 0x1c, 0x11, 0x12, 0x9c, 0x39, 0xa2, 0x1e, 0x2b, 0x4e, 0x3d, 0xad, - 0xe8, 0xf2, 0x01, 0xb8, 0x87, 0x0b, 0x9a, 0xd9, 0xf1, 0xee, 0xda, 0xde, 0x0d, 0x49, 0x0f, 0x15, - 0xb7, 0xcc, 0x9b, 0xdf, 0xfb, 0xcd, 0xdb, 0x79, 0xbf, 0x79, 0xef, 0x39, 0xe0, 0xfd, 0xe3, 0x77, - 0xa8, 0x66, 0x93, 0xe6, 0xf1, 0xa8, 0x8b, 0x3d, 0x17, 0x33, 0x4c, 0x9b, 0x27, 0xd8, 0xb5, 0x88, - 0xd7, 0x94, 0x1b, 0xc6, 0xd0, 0x6e, 0x76, 0x0d, 0x66, 0x1e, 0x35, 0x4f, 0x36, 0x9b, 0x7d, 0xec, - 0x62, 0xcf, 0x60, 0xd8, 0xd2, 0x86, 0x1e, 0x61, 0x04, 0x5e, 0x09, 0x41, 0x9a, 0x31, 0xb4, 0x35, - 0x01, 0xd2, 0x4e, 0x36, 0x6b, 0x6f, 0xf4, 0x6d, 0x76, 0x34, 0xea, 0x6a, 0x26, 0x19, 0x34, 0xfb, - 0xa4, 0x4f, 0x9a, 0x02, 0xdb, 0x1d, 0xf5, 0xc4, 0x4a, 0x2c, 0xc4, 0x5f, 0x21, 0x47, 0xad, 0x91, - 0x38, 0xc8, 0x24, 0x1e, 0x4e, 0x39, 0xa7, 0xb6, 0x1d, 0x63, 0x06, 0x86, 0x79, 0x64, 0xbb, 0xd8, - 0x1b, 0x37, 0x87, 0xc7, 0x7d, 0x6e, 0xa0, 0xcd, 0x01, 0x66, 0x46, 0x9a, 0x57, 0x33, 0xcb, 0xcb, - 0x1b, 0xb9, 0xcc, 0x1e, 0xe0, 0x39, 0x87, 0xb7, 0xff, 0xcd, 0x81, 0x9a, 0x47, 0x78, 0x60, 0xcc, - 0xfa, 0x35, 0xfe, 0x56, 0x40, 0x61, 0xd7, 0x23, 0xee, 0x1e, 0xe9, 0xc2, 0xaf, 0x40, 0x91, 0xc7, - 0x63, 0x19, 0xcc, 0xa8, 0x2a, 0xeb, 0xca, 0x46, 0x79, 0xeb, 0x4d, 0x2d, 0xbe, 0xa5, 0x88, 0x56, - 0x1b, 0x1e, 0xf7, 0xb9, 0x81, 0x6a, 0x1c, 0xad, 0x9d, 0x6c, 0x6a, 0xf7, 0xbb, 0x8f, 0xb0, 0xc9, - 0xf6, 0x31, 0x33, 0x74, 0xf8, 0xd8, 0x57, 0x17, 0x02, 0x5f, 0x05, 0xb1, 0x0d, 0x45, 0xac, 0x50, - 0x07, 0x4b, 0x74, 0x88, 0xcd, 0xea, 0xa2, 0x60, 0x5f, 0xd7, 0x52, 0x72, 0xa0, 0xc9, 0x68, 0xda, - 0x43, 0x6c, 0xea, 0x2b, 0x92, 0x6d, 0x89, 0xaf, 0x90, 0xf0, 0x85, 0x7b, 0x60, 0x99, 0x32, 0x83, - 0x8d, 0x68, 0x35, 0x27, 0x58, 0x1a, 0x67, 0xb2, 0x08, 0xa4, 0xbe, 0x26, 0x79, 0x96, 0xc3, 0x35, - 0x92, 0x0c, 0x8d, 0x1f, 0x15, 0x50, 0x96, 0xc8, 0x96, 0x4d, 0x19, 0x7c, 0x38, 0x77, 0x03, 0xda, - 0xf9, 0x6e, 0x80, 0x7b, 0x8b, 0xef, 0xaf, 0xc8, 0x93, 0x8a, 0x13, 0x4b, 0xe2, 0xeb, 0x77, 0x40, - 0xde, 0x66, 0x78, 0x40, 0xab, 0x8b, 0xeb, 0xb9, 0x8d, 0xf2, 0xd6, 0x8d, 0xb3, 0x02, 0xd7, 0x57, - 0x25, 0x51, 0xfe, 0x1e, 0x77, 0x41, 0xa1, 0x67, 0xe3, 0xf7, 0xa5, 0x28, 0x60, 0x7e, 0x25, 0xf0, - 0x36, 0x28, 0xf2, 0xc4, 0x5a, 0x23, 0x07, 0x8b, 0x80, 0x4b, 0x71, 0x00, 0x6d, 0x69, 0x47, 0x11, - 0x02, 0x6e, 0x80, 0x22, 0xd7, 0xc2, 0x03, 0xe2, 0xe2, 0x6a, 0x51, 0xa0, 0x57, 0x38, 0xb2, 0x23, - 0x6d, 0x28, 0xda, 0x85, 0x87, 0xe0, 0x3a, 0x65, 0x86, 0xc7, 0x6c, 0xb7, 0x7f, 0x07, 0x1b, 0x96, - 0x63, 0xbb, 0xb8, 0x8d, 0x4d, 0xe2, 0x5a, 0x54, 0xe4, 0x2e, 0xa7, 0xbf, 0x14, 0xf8, 0xea, 0xf5, - 0x76, 0x3a, 0x04, 0x65, 0xf9, 0xc2, 0x87, 0xe0, 0xb2, 0x49, 0x5c, 0x73, 0xe4, 0x79, 0xd8, 0x35, - 0xc7, 0x07, 0xc4, 0xb1, 0xcd, 0xb1, 0x48, 0x63, 0x49, 0xd7, 0x64, 0xdc, 0x97, 0x77, 0x67, 0x01, - 0xa7, 0x69, 0x46, 0x34, 0x4f, 0x04, 0x5f, 0x01, 0x05, 0x3a, 0xa2, 0x43, 0xec, 0x5a, 0xd5, 0xa5, - 0x75, 0x65, 0xa3, 0xa8, 0x97, 0x03, 0x5f, 0x2d, 0xb4, 0x43, 0x13, 0x9a, 0xec, 0xc1, 0xcf, 0x41, - 0xf9, 0x11, 0xe9, 0x76, 0xf0, 0x60, 0xe8, 0x18, 0x0c, 0x57, 0xf3, 0x22, 0xcf, 0xb7, 0x52, 0x93, - 0xb1, 0x17, 0xe3, 0x84, 0x1e, 0xaf, 0xc8, 0x20, 0xcb, 0x89, 0x0d, 0x94, 0x64, 0x83, 0x5f, 0x82, - 0x1a, 0x1d, 0x99, 0x26, 0xa6, 0xb4, 0x37, 0x72, 0xf6, 0x48, 0x97, 0x7e, 0x6c, 0x53, 0x46, 0xbc, - 0x71, 0xcb, 0x1e, 0xd8, 0xac, 0xba, 0xbc, 0xae, 0x6c, 0xe4, 0xf5, 0x7a, 0xe0, 0xab, 0xb5, 0x76, - 0x26, 0x0a, 0x9d, 0xc1, 0x00, 0x11, 0xb8, 0xd6, 0x33, 0x6c, 0x07, 0x5b, 0x73, 0xdc, 0x05, 0xc1, - 0x5d, 0x0b, 0x7c, 0xf5, 0xda, 0xdd, 0x54, 0x04, 0xca, 0xf0, 0x6c, 0xfc, 0xba, 0x08, 0x56, 0xa7, - 0xde, 0x0b, 0xfc, 0x04, 0x2c, 0x1b, 0x26, 0xb3, 0x4f, 0xb8, 0xa8, 0xb8, 0x54, 0x5f, 0x4e, 0xde, - 0x0e, 0xaf, 0x74, 0xf1, 0xab, 0x47, 0xb8, 0x87, 0x79, 0x12, 0x70, 0xfc, 0xc8, 0x76, 0x84, 0x2b, - 0x92, 0x14, 0xd0, 0x01, 0x15, 0xc7, 0xa0, 0x6c, 0xa2, 0x47, 0xae, 0x36, 0x91, 0x9f, 0xf2, 0xd6, - 0xeb, 0xe7, 0x7b, 0x5c, 0xdc, 0x43, 0xff, 0x5f, 0xe0, 0xab, 0x95, 0xd6, 0x0c, 0x0f, 0x9a, 0x63, - 0x86, 0x1e, 0x80, 0xc2, 0x16, 0x5d, 0xa1, 0x38, 0x2f, 0x7f, 0xe1, 0xf3, 0xae, 0x05, 0xbe, 0x0a, - 0x5b, 0x73, 0x4c, 0x28, 0x85, 0xbd, 0xf1, 0x97, 0x02, 0x72, 0x2f, 0xa6, 0x80, 0x7e, 0x38, 0x55, - 0x40, 0x6f, 0x64, 0x89, 0x36, 0xb3, 0x78, 0xde, 0x9d, 0x29, 0x9e, 0xf5, 0x4c, 0x86, 0xb3, 0x0b, - 0xe7, 0x6f, 0x39, 0xb0, 0xb2, 0x47, 0xba, 0xbb, 0xc4, 0xb5, 0x6c, 0x66, 0x13, 0x17, 0x6e, 0x83, - 0x25, 0x36, 0x1e, 0x4e, 0x8a, 0xd0, 0xfa, 0xe4, 0xe8, 0xce, 0x78, 0x88, 0x4f, 0x7d, 0xb5, 0x92, - 0xc4, 0x72, 0x1b, 0x12, 0x68, 0xd8, 0x8a, 0xc2, 0x59, 0x14, 0x7e, 0xdb, 0xd3, 0xc7, 0x9d, 0xfa, - 0x6a, 0x4a, 0x8b, 0xd5, 0x22, 0xa6, 0xe9, 0xa0, 0x60, 0x1f, 0xac, 0xf2, 0xe4, 0x1c, 0x78, 0xa4, - 0x1b, 0xaa, 0x2c, 0x77, 0xe1, 0xac, 0x5f, 0x95, 0x01, 0xac, 0xb6, 0x92, 0x44, 0x68, 0x9a, 0x17, - 0x9e, 0x84, 0x1a, 0xeb, 0x78, 0x86, 0x4b, 0xc3, 0x4f, 0x7a, 0x3e, 0x4d, 0xd7, 0xe4, 0x69, 0x42, - 0x67, 0xd3, 0x6c, 0x28, 0xe5, 0x04, 0xf8, 0x2a, 0x58, 0xf6, 0xb0, 0x41, 0x89, 0x2b, 0xf4, 0x5c, - 0x8a, 0xb3, 0x83, 0x84, 0x15, 0xc9, 0x5d, 0xf8, 0x1a, 0x28, 0x0c, 0x30, 0xa5, 0x46, 0x1f, 0x8b, - 0x8a, 0x53, 0xd2, 0x2f, 0x49, 0x60, 0x61, 0x3f, 0x34, 0xa3, 0xc9, 0x7e, 0xe3, 0x07, 0x05, 0x14, - 0x5e, 0x4c, 0xf7, 0xfb, 0x60, 0xba, 0xfb, 0x55, 0xb3, 0x94, 0x97, 0xd1, 0xf9, 0x7e, 0xca, 0x8b, - 0x40, 0x45, 0xd7, 0xdb, 0x04, 0xe5, 0xa1, 0xe1, 0x19, 0x8e, 0x83, 0x1d, 0x9b, 0x0e, 0x44, 0xac, - 0x79, 0xfd, 0x12, 0xaf, 0xcb, 0x07, 0xb1, 0x19, 0x25, 0x31, 0xdc, 0xc5, 0x24, 0x83, 0xa1, 0x83, - 0xf9, 0x65, 0x86, 0x72, 0x93, 0x2e, 0xbb, 0xb1, 0x19, 0x25, 0x31, 0xf0, 0x3e, 0xb8, 0x1a, 0x56, - 0xb0, 0xd9, 0x0e, 0x98, 0x13, 0x1d, 0xf0, 0xff, 0x81, 0xaf, 0x5e, 0xdd, 0x49, 0x03, 0xa0, 0x74, - 0x3f, 0xb8, 0x0d, 0x56, 0xba, 0x86, 0x79, 0x4c, 0x7a, 0xbd, 0x64, 0xc5, 0xae, 0x04, 0xbe, 0xba, - 0xa2, 0x27, 0xec, 0x68, 0x0a, 0x05, 0xbf, 0x00, 0x45, 0x8a, 0x1d, 0x6c, 0x32, 0xe2, 0x49, 0x89, - 0xbd, 0x75, 0xce, 0xac, 0x18, 0x5d, 0xec, 0xb4, 0xa5, 0x6b, 0xd8, 0xe9, 0x27, 0x2b, 0x14, 0x51, - 0xc2, 0xf7, 0xc0, 0xda, 0xc0, 0x70, 0x47, 0x46, 0x84, 0x14, 0xda, 0x2a, 0xea, 0x30, 0xf0, 0xd5, - 0xb5, 0xfd, 0xa9, 0x1d, 0x34, 0x83, 0x84, 0x9f, 0x82, 0x22, 0x9b, 0xb4, 0xd1, 0x65, 0x11, 0x5a, - 0x6a, 0xa3, 0x38, 0x20, 0xd6, 0x54, 0x17, 0x8d, 0x54, 0x12, 0xb5, 0xd0, 0x88, 0x86, 0x0f, 0x1e, - 0x8c, 0x39, 0xf2, 0xc6, 0x76, 0x7a, 0x0c, 0x7b, 0x77, 0x6d, 0xd7, 0xa6, 0x47, 0xd8, 0x12, 0x13, - 0x4b, 0x3e, 0x1c, 0x3c, 0x3a, 0x9d, 0x56, 0x1a, 0x04, 0x65, 0xf9, 0xc2, 0x16, 0x58, 0x8b, 0x53, - 0xbb, 0x4f, 0x2c, 0x5c, 0x2d, 0x89, 0x87, 0x71, 0x8b, 0x7f, 0xe5, 0xee, 0xd4, 0xce, 0xe9, 0x9c, - 0x05, 0xcd, 0xf8, 0x26, 0x07, 0x0d, 0x90, 0x3d, 0x68, 0x34, 0xbe, 0xcf, 0x83, 0x52, 0xdc, 0x53, - 0x0f, 0x01, 0x30, 0x27, 0x85, 0x8b, 0xca, 0xbe, 0x7a, 0x33, 0xeb, 0x11, 0x44, 0x25, 0x2e, 0xee, - 0x07, 0x91, 0x89, 0xa2, 0x04, 0x11, 0xfc, 0x0c, 0x94, 0xc4, 0xb4, 0x25, 0x4a, 0xd0, 0xe2, 0x85, - 0x4b, 0xd0, 0x6a, 0xe0, 0xab, 0xa5, 0xf6, 0x84, 0x00, 0xc5, 0x5c, 0xb0, 0x97, 0xbc, 0xb2, 0xe7, - 0x2c, 0xa7, 0x70, 0xfa, 0x7a, 0xc5, 0x11, 0x33, 0xac, 0xbc, 0xa8, 0xc9, 0x59, 0x63, 0x49, 0x24, - 0x38, 0x6b, 0x8c, 0x68, 0x82, 0x92, 0x98, 0x8b, 0xb0, 0x85, 0x2d, 0xa1, 0xd1, 0xbc, 0x7e, 0x59, - 0x42, 0x4b, 0xed, 0xc9, 0x06, 0x8a, 0x31, 0x9c, 0x38, 0x1c, 0x78, 0xe4, 0xd8, 0x15, 0x11, 0x87, - 0xe3, 0x11, 0x92, 0xbb, 0xf0, 0x0e, 0xa8, 0xc8, 0x90, 0xb0, 0x75, 0xcf, 0xb5, 0xf0, 0xd7, 0x98, - 0x8a, 0xa7, 0x59, 0xd2, 0xab, 0xd2, 0xa3, 0xb2, 0x3b, 0xb3, 0x8f, 0xe6, 0x3c, 0xe0, 0xb7, 0x0a, - 0xb8, 0x3e, 0x72, 0x4d, 0x32, 0x72, 0x19, 0xb6, 0x3a, 0xd8, 0x1b, 0xd8, 0x2e, 0xff, 0x99, 0x75, - 0x40, 0x2c, 0x2a, 0x94, 0x5b, 0xde, 0xba, 0x9d, 0x9a, 0xec, 0xc3, 0x74, 0x9f, 0x50, 0xe7, 0x19, - 0x9b, 0x28, 0xeb, 0x24, 0xa8, 0x82, 0xbc, 0x87, 0x0d, 0x6b, 0x2c, 0xe4, 0x9d, 0xd7, 0x4b, 0xbc, - 0x8c, 0x22, 0x6e, 0x40, 0xa1, 0xbd, 0xf1, 0xb3, 0x02, 0x2e, 0xcd, 0x4c, 0xb5, 0xff, 0xfd, 0xb1, - 0xa5, 0xf1, 0x8b, 0x02, 0xb2, 0xee, 0x02, 0x1e, 0x24, 0x75, 0xc1, 0x9f, 0x55, 0x49, 0xdf, 0x9a, - 0xd2, 0xc4, 0xa9, 0xaf, 0xde, 0xcc, 0xfa, 0x75, 0xcc, 0xa7, 0x10, 0xaa, 0x1d, 0xde, 0xbb, 0x93, - 0x14, 0xce, 0x47, 0x91, 0x70, 0x16, 0x05, 0x5d, 0x33, 0x16, 0xcd, 0xf9, 0xb8, 0xa4, 0xbb, 0xfe, - 0xee, 0xe3, 0x67, 0xf5, 0x85, 0x27, 0xcf, 0xea, 0x0b, 0x4f, 0x9f, 0xd5, 0x17, 0xbe, 0x09, 0xea, - 0xca, 0xe3, 0xa0, 0xae, 0x3c, 0x09, 0xea, 0xca, 0xd3, 0xa0, 0xae, 0xfc, 0x11, 0xd4, 0x95, 0xef, - 0xfe, 0xac, 0x2f, 0x3c, 0xb8, 0x92, 0xf2, 0xef, 0x8a, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xae, - 0xfd, 0x54, 0xb2, 0xdd, 0x10, 0x00, 0x00, + // 1797 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x6f, 0x23, 0x49, + 0x15, 0x8f, 0x93, 0x38, 0xb1, 0xcb, 0xf9, 0xf0, 0xd4, 0x64, 0x66, 0x4c, 0x58, 0xb9, 0xb3, 0x9e, + 0xdd, 0x55, 0x16, 0x2d, 0xed, 0x9d, 0xec, 0x88, 0xe5, 0x5b, 0x3b, 0x9d, 0x61, 0x96, 0x09, 0xce, + 0x8e, 0x29, 0x67, 0x40, 0x5a, 0x16, 0x44, 0xb9, 0xbb, 0xec, 0xf4, 0xa6, 0xdd, 0xd5, 0x74, 0x55, + 0x47, 0x93, 0x0b, 0x42, 0xe2, 0x0f, 0x80, 0xbf, 0x82, 0x23, 0x17, 0x38, 0xc3, 0x0d, 0xcd, 0x71, + 0xc5, 0x69, 0xc5, 0xa1, 0xc5, 0x34, 0x7f, 0x00, 0xf7, 0x20, 0x24, 0x54, 0xd5, 0xe5, 0xfe, 0x72, + 0x77, 0xc8, 0xac, 0xc4, 0x88, 0x5b, 0xfa, 0xbd, 0xdf, 0xfb, 0xd5, 0xc7, 0x7b, 0xf5, 0x7b, 0x2f, + 0x06, 0xdf, 0x3e, 0xfb, 0x3a, 0xd3, 0x6d, 0xda, 0x3f, 0x0b, 0xc6, 0xc4, 0x77, 0x09, 0x27, 0xac, + 0x7f, 0x4e, 0x5c, 0x8b, 0xfa, 0x7d, 0xe5, 0xc0, 0x9e, 0xdd, 0x1f, 0x63, 0x6e, 0x9e, 0xf6, 0xcf, + 0xef, 0xf5, 0xa7, 0xc4, 0x25, 0x3e, 0xe6, 0xc4, 0xd2, 0x3d, 0x9f, 0x72, 0x0a, 0x6f, 0xc6, 0x20, + 0x1d, 0x7b, 0xb6, 0x2e, 0x41, 0xfa, 0xf9, 0xbd, 0xdd, 0xaf, 0x4e, 0x6d, 0x7e, 0x1a, 0x8c, 0x75, + 0x93, 0xce, 0xfa, 0x53, 0x3a, 0xa5, 0x7d, 0x89, 0x1d, 0x07, 0x13, 0xf9, 0x25, 0x3f, 0xe4, 0x5f, + 0x31, 0xc7, 0x6e, 0x2f, 0xb3, 0x90, 0x49, 0x7d, 0x52, 0xb2, 0xce, 0xee, 0xfd, 0x14, 0x33, 0xc3, + 0xe6, 0xa9, 0xed, 0x12, 0xff, 0xa2, 0xef, 0x9d, 0x4d, 0x85, 0x81, 0xf5, 0x67, 0x84, 0xe3, 0xb2, + 0xa8, 0x7e, 0x55, 0x94, 0x1f, 0xb8, 0xdc, 0x9e, 0x91, 0x85, 0x80, 0xaf, 0xfd, 0xb7, 0x00, 0x66, + 0x9e, 0x92, 0x19, 0x2e, 0xc6, 0xf5, 0xfe, 0x55, 0x03, 0xeb, 0x87, 0x3e, 0x75, 0x8f, 0xe8, 0x18, + 0xfe, 0x1c, 0x34, 0xc4, 0x7e, 0x2c, 0xcc, 0x71, 0xa7, 0xb6, 0x57, 0xdb, 0x6f, 0x1d, 0xbc, 0xab, + 0xa7, 0xb7, 0x94, 0xd0, 0xea, 0xde, 0xd9, 0x54, 0x18, 0x98, 0x2e, 0xd0, 0xfa, 0xf9, 0x3d, 0xfd, + 0xc9, 0xf8, 0x53, 0x62, 0xf2, 0x63, 0xc2, 0xb1, 0x01, 0x9f, 0x87, 0xda, 0x52, 0x14, 0x6a, 0x20, + 0xb5, 0xa1, 0x84, 0x15, 0x1a, 0x60, 0x95, 0x79, 0xc4, 0xec, 0x2c, 0x4b, 0xf6, 0x3d, 0xbd, 0x24, + 0x07, 0xba, 0xda, 0xcd, 0xc8, 0x23, 0xa6, 0xb1, 0xa1, 0xd8, 0x56, 0xc5, 0x17, 0x92, 0xb1, 0xf0, + 0x08, 0xac, 0x31, 0x8e, 0x79, 0xc0, 0x3a, 0x2b, 0x92, 0xa5, 0x77, 0x25, 0x8b, 0x44, 0x1a, 0x5b, + 0x8a, 0x67, 0x2d, 0xfe, 0x46, 0x8a, 0xa1, 0xf7, 0xfb, 0x1a, 0x68, 0x29, 0xe4, 0xc0, 0x66, 0x1c, + 0x7e, 0xb2, 0x70, 0x03, 0xfa, 0xf5, 0x6e, 0x40, 0x44, 0xcb, 0xf3, 0xb7, 0xd5, 0x4a, 0x8d, 0xb9, + 0x25, 0x73, 0xfa, 0x07, 0xa0, 0x6e, 0x73, 0x32, 0x63, 0x9d, 0xe5, 0xbd, 0x95, 0xfd, 0xd6, 0xc1, + 0x6b, 0x57, 0x6d, 0xdc, 0xd8, 0x54, 0x44, 0xf5, 0xc7, 0x22, 0x04, 0xc5, 0x91, 0xbd, 0xbf, 0xae, + 0x26, 0x1b, 0x16, 0x57, 0x02, 0xdf, 0x01, 0x0d, 0x91, 0x58, 0x2b, 0x70, 0x88, 0xdc, 0x70, 0x33, + 0xdd, 0xc0, 0x48, 0xd9, 0x51, 0x82, 0x80, 0xfb, 0xa0, 0x21, 0x6a, 0xe1, 0x63, 0xea, 0x92, 0x4e, + 0x43, 0xa2, 0x37, 0x04, 0xf2, 0x44, 0xd9, 0x50, 0xe2, 0x85, 0x4f, 0xc1, 0x1d, 0xc6, 0xb1, 0xcf, + 0x6d, 0x77, 0xfa, 0x90, 0x60, 0xcb, 0xb1, 0x5d, 0x32, 0x22, 0x26, 0x75, 0x2d, 0x26, 0x73, 0xb7, + 0x62, 0x7c, 0x39, 0x0a, 0xb5, 0x3b, 0xa3, 0x72, 0x08, 0xaa, 0x8a, 0x85, 0x9f, 0x80, 0x1b, 0x26, + 0x75, 0xcd, 0xc0, 0xf7, 0x89, 0x6b, 0x5e, 0x0c, 0xa9, 0x63, 0x9b, 0x17, 0x32, 0x8d, 0x4d, 0x43, + 0x57, 0xfb, 0xbe, 0x71, 0x58, 0x04, 0x5c, 0x96, 0x19, 0xd1, 0x22, 0x11, 0x7c, 0x13, 0xac, 0xb3, + 0x80, 0x79, 0xc4, 0xb5, 0x3a, 0xab, 0x7b, 0xb5, 0xfd, 0x86, 0xd1, 0x8a, 0x42, 0x6d, 0x7d, 0x14, + 0x9b, 0xd0, 0xdc, 0x07, 0x7f, 0x02, 0x5a, 0x9f, 0xd2, 0xf1, 0x09, 0x99, 0x79, 0x0e, 0xe6, 0xa4, + 0x53, 0x97, 0x79, 0x7e, 0xa3, 0x34, 0x19, 0x47, 0x29, 0x4e, 0xd6, 0xe3, 0x4d, 0xb5, 0xc9, 0x56, + 0xc6, 0x81, 0xb2, 0x6c, 0xf0, 0x67, 0x60, 0x97, 0x05, 0xa6, 0x49, 0x18, 0x9b, 0x04, 0xce, 0x11, + 0x1d, 0xb3, 0xef, 0xdb, 0x8c, 0x53, 0xff, 0x62, 0x60, 0xcf, 0x6c, 0xde, 0x59, 0xdb, 0xab, 0xed, + 0xd7, 0x8d, 0x6e, 0x14, 0x6a, 0xbb, 0xa3, 0x4a, 0x14, 0xba, 0x82, 0x01, 0x22, 0x70, 0x7b, 0x82, + 0x6d, 0x87, 0x58, 0x0b, 0xdc, 0xeb, 0x92, 0x7b, 0x37, 0x0a, 0xb5, 0xdb, 0x8f, 0x4a, 0x11, 0xa8, + 0x22, 0xb2, 0xf7, 0xa7, 0x65, 0xb0, 0x99, 0x7b, 0x2f, 0xf0, 0x07, 0x60, 0x0d, 0x9b, 0xdc, 0x3e, + 0x17, 0x45, 0x25, 0x4a, 0xf5, 0x6e, 0xf6, 0x76, 0x84, 0xd2, 0xa5, 0xaf, 0x1e, 0x91, 0x09, 0x11, + 0x49, 0x20, 0xe9, 0x23, 0x7b, 0x20, 0x43, 0x91, 0xa2, 0x80, 0x0e, 0x68, 0x3b, 0x98, 0xf1, 0x79, + 0x3d, 0x8a, 0x6a, 0x93, 0xf9, 0x69, 0x1d, 0x7c, 0xe5, 0x7a, 0x8f, 0x4b, 0x44, 0x18, 0x3b, 0x51, + 0xa8, 0xb5, 0x07, 0x05, 0x1e, 0xb4, 0xc0, 0x0c, 0x7d, 0x00, 0xa5, 0x2d, 0xb9, 0x42, 0xb9, 0x5e, + 0xfd, 0xa5, 0xd7, 0xbb, 0x1d, 0x85, 0x1a, 0x1c, 0x2c, 0x30, 0xa1, 0x12, 0xf6, 0xde, 0x3f, 0x6b, + 0x60, 0xe5, 0xd5, 0x08, 0xe8, 0x77, 0x73, 0x02, 0xfa, 0x5a, 0x55, 0xd1, 0x56, 0x8a, 0xe7, 0xa3, + 0x82, 0x78, 0x76, 0x2b, 0x19, 0xae, 0x16, 0xce, 0xbf, 0xac, 0x80, 0x8d, 0x23, 0x3a, 0x3e, 0xa4, + 0xae, 0x65, 0x73, 0x9b, 0xba, 0xf0, 0x3e, 0x58, 0xe5, 0x17, 0xde, 0x5c, 0x84, 0xf6, 0xe6, 0x4b, + 0x9f, 0x5c, 0x78, 0xe4, 0x32, 0xd4, 0xda, 0x59, 0xac, 0xb0, 0x21, 0x89, 0x86, 0x83, 0x64, 0x3b, + 0xcb, 0x32, 0xee, 0x7e, 0x7e, 0xb9, 0xcb, 0x50, 0x2b, 0x69, 0xb1, 0x7a, 0xc2, 0x94, 0xdf, 0x14, + 0x9c, 0x82, 0x4d, 0x91, 0x9c, 0xa1, 0x4f, 0xc7, 0x71, 0x95, 0xad, 0xbc, 0x74, 0xd6, 0x6f, 0xa9, + 0x0d, 0x6c, 0x0e, 0xb2, 0x44, 0x28, 0xcf, 0x0b, 0xcf, 0xe3, 0x1a, 0x3b, 0xf1, 0xb1, 0xcb, 0xe2, + 0x23, 0x7d, 0xb1, 0x9a, 0xde, 0x55, 0xab, 0xc9, 0x3a, 0xcb, 0xb3, 0xa1, 0x92, 0x15, 0xe0, 0x5b, + 0x60, 0xcd, 0x27, 0x98, 0x51, 0x57, 0xd6, 0x73, 0x33, 0xcd, 0x0e, 0x92, 0x56, 0xa4, 0xbc, 0xf0, + 0x6d, 0xb0, 0x3e, 0x23, 0x8c, 0xe1, 0x29, 0x91, 0x8a, 0xd3, 0x34, 0xb6, 0x15, 0x70, 0xfd, 0x38, + 0x36, 0xa3, 0xb9, 0xbf, 0xf7, 0xbb, 0x1a, 0x58, 0x7f, 0x35, 0xdd, 0xef, 0x3b, 0xf9, 0xee, 0xd7, + 0xa9, 0xaa, 0xbc, 0x8a, 0xce, 0xf7, 0x9b, 0x86, 0xdc, 0xa8, 0xec, 0x7a, 0xf7, 0x40, 0xcb, 0xc3, + 0x3e, 0x76, 0x1c, 0xe2, 0xd8, 0x6c, 0x26, 0xf7, 0x5a, 0x37, 0xb6, 0x85, 0x2e, 0x0f, 0x53, 0x33, + 0xca, 0x62, 0x44, 0x88, 0x49, 0x67, 0x9e, 0x43, 0xc4, 0x65, 0xc6, 0xe5, 0xa6, 0x42, 0x0e, 0x53, + 0x33, 0xca, 0x62, 0xe0, 0x13, 0x70, 0x2b, 0x56, 0xb0, 0x62, 0x07, 0x5c, 0x91, 0x1d, 0xf0, 0x4b, + 0x51, 0xa8, 0xdd, 0x7a, 0x50, 0x06, 0x40, 0xe5, 0x71, 0x70, 0x0a, 0xda, 0x1e, 0xb5, 0x84, 0x38, + 0x07, 0x3e, 0x51, 0xcd, 0xaf, 0x25, 0xef, 0xf9, 0xcd, 0xd2, 0xcb, 0x18, 0x16, 0xc0, 0xb1, 0x06, + 0x16, 0xad, 0x68, 0x81, 0x14, 0xde, 0x07, 0x1b, 0x63, 0x6c, 0x9e, 0xd1, 0xc9, 0x24, 0xdb, 0x1a, + 0xda, 0x51, 0xa8, 0x6d, 0x18, 0x19, 0x3b, 0xca, 0xa1, 0xe0, 0x00, 0xec, 0x64, 0xbf, 0x87, 0xc4, + 0x7f, 0xec, 0x5a, 0xe4, 0x59, 0x67, 0x43, 0x46, 0x77, 0xa2, 0x50, 0xdb, 0x31, 0x4a, 0xfc, 0xa8, + 0x34, 0x0a, 0x7e, 0x00, 0xda, 0x33, 0xfc, 0x2c, 0xee, 0x44, 0xd2, 0x42, 0x58, 0x67, 0x53, 0x32, + 0xc9, 0x53, 0x1c, 0x17, 0x7c, 0x68, 0x01, 0x0d, 0x7f, 0x0a, 0x1a, 0x8c, 0x38, 0xc4, 0xe4, 0xd4, + 0x57, 0x6f, 0xeb, 0xbd, 0x6b, 0x96, 0x23, 0x1e, 0x13, 0x67, 0xa4, 0x42, 0xe3, 0x11, 0x67, 0xfe, + 0x85, 0x12, 0x4a, 0xf8, 0x4d, 0xb0, 0x35, 0xc3, 0x6e, 0x80, 0x13, 0xa4, 0x7c, 0x54, 0x0d, 0x03, + 0x46, 0xa1, 0xb6, 0x75, 0x9c, 0xf3, 0xa0, 0x02, 0x12, 0xfe, 0x10, 0x34, 0xf8, 0x7c, 0x7e, 0x58, + 0x93, 0x5b, 0x2b, 0xed, 0x90, 0x43, 0x6a, 0xe5, 0xc6, 0x87, 0xe4, 0x79, 0x24, 0xb3, 0x43, 0x42, + 0x23, 0x26, 0x2e, 0xce, 0x1d, 0x55, 0x2a, 0x0f, 0x26, 0x9c, 0xf8, 0x8f, 0x6c, 0xd7, 0x66, 0xa7, + 0xc4, 0x92, 0xa3, 0x5a, 0x3d, 0x9e, 0xb8, 0x4e, 0x4e, 0x06, 0x65, 0x10, 0x54, 0x15, 0x0b, 0x07, + 0x60, 0x2b, 0xad, 0xe9, 0x63, 0x6a, 0x91, 0x4e, 0x53, 0x2a, 0xc2, 0x1b, 0xe2, 0x94, 0x87, 0x39, + 0xcf, 0xe5, 0x82, 0x05, 0x15, 0x62, 0xb3, 0x13, 0x16, 0xb8, 0x62, 0xc2, 0xb2, 0xc0, 0x8e, 0x47, + 0x2d, 0x44, 0x3c, 0x07, 0x9b, 0x64, 0x46, 0x5c, 0xae, 0x8a, 0x7d, 0x4b, 0x2e, 0xfd, 0xae, 0xa8, + 0xa4, 0x61, 0x89, 0xff, 0xb2, 0xc2, 0x8e, 0x4a, 0xd9, 0x7a, 0xff, 0xae, 0x83, 0x66, 0x3a, 0xb2, + 0x3c, 0x05, 0xc0, 0x9c, 0xf7, 0x05, 0xa6, 0xc6, 0x96, 0xd7, 0xab, 0x34, 0x26, 0xe9, 0x20, 0x69, + 0xbb, 0x4d, 0x4c, 0x0c, 0x65, 0x88, 0xe0, 0x8f, 0x41, 0x53, 0x0e, 0xb3, 0x52, 0xe1, 0x97, 0x5f, + 0x5a, 0xe1, 0x37, 0xa3, 0x50, 0x6b, 0x8e, 0xe6, 0x04, 0x28, 0xe5, 0x82, 0x93, 0x6c, 0x62, 0xbe, + 0x60, 0xb7, 0x82, 0xf9, 0x24, 0xca, 0x25, 0x0a, 0xac, 0xa2, 0x67, 0xa8, 0x51, 0x6e, 0x55, 0x96, + 0x51, 0xd5, 0x94, 0xd6, 0x07, 0x4d, 0x39, 0x76, 0x12, 0x8b, 0x58, 0xf2, 0x25, 0xd4, 0x8d, 0x1b, + 0x0a, 0xda, 0x1c, 0xcd, 0x1d, 0x28, 0xc5, 0x08, 0xe2, 0x78, 0x9e, 0x54, 0x53, 0x6d, 0x42, 0x1c, + 0xbf, 0x62, 0xa4, 0xbc, 0x42, 0x79, 0x39, 0xf1, 0x67, 0xb6, 0x8b, 0xc5, 0x7f, 0x04, 0x52, 0xf0, + 0x94, 0xf2, 0x9e, 0xa4, 0x66, 0x94, 0xc5, 0xc0, 0x87, 0xa0, 0xad, 0x4e, 0x91, 0x6a, 0xc7, 0xba, + 0xac, 0x9d, 0x8e, 0x5a, 0xa4, 0x7d, 0x58, 0xf0, 0xa3, 0x85, 0x08, 0xf8, 0x3e, 0xd8, 0x9c, 0xe4, + 0xe4, 0x07, 0x48, 0x8a, 0x1b, 0xa2, 0xbd, 0xe7, 0xb5, 0x27, 0x8f, 0x83, 0xbf, 0xae, 0x81, 0x3b, + 0x81, 0x6b, 0xd2, 0xc0, 0xe5, 0xc4, 0x9a, 0x6f, 0x92, 0x58, 0x43, 0x6a, 0x31, 0xf9, 0x16, 0x5b, + 0x07, 0xef, 0x94, 0x16, 0xd6, 0xd3, 0xf2, 0x98, 0xf8, 0xe5, 0x56, 0x38, 0x51, 0xd5, 0x4a, 0x50, + 0x03, 0x75, 0x9f, 0x60, 0xeb, 0x42, 0x3e, 0xd8, 0xba, 0xd1, 0x14, 0x1d, 0x11, 0x09, 0x03, 0x8a, + 0xed, 0xbd, 0x3f, 0xd4, 0xc0, 0x76, 0xe1, 0x1f, 0x94, 0xff, 0xff, 0x09, 0xb4, 0x37, 0x06, 0x0b, + 0x1d, 0x0c, 0x7e, 0x04, 0xea, 0x7e, 0xe0, 0x90, 0xf9, 0xb3, 0x7d, 0xfb, 0x5a, 0xdd, 0x10, 0x05, + 0x0e, 0x49, 0x67, 0x05, 0xf1, 0xc5, 0x50, 0x4c, 0xd3, 0xfb, 0x5b, 0x0d, 0xbc, 0x55, 0x84, 0x3f, + 0x71, 0xbf, 0xf7, 0xcc, 0xe6, 0x87, 0xd4, 0x22, 0x0c, 0x91, 0x5f, 0x04, 0xb6, 0x2f, 0xa5, 0x44, + 0x14, 0x89, 0x49, 0x5d, 0x8e, 0xc5, 0xb5, 0x7c, 0x84, 0x67, 0xf3, 0x01, 0x56, 0x16, 0xc9, 0x61, + 0xd6, 0x81, 0xf2, 0x38, 0x38, 0x02, 0x0d, 0xea, 0x11, 0x1f, 0x8b, 0xc6, 0x11, 0x0f, 0xaf, 0xef, + 0xcf, 0xd5, 0xfd, 0x89, 0xb2, 0x5f, 0x86, 0xda, 0xdd, 0x2b, 0xb6, 0x31, 0x87, 0xa1, 0x84, 0x08, + 0xf6, 0xc0, 0xda, 0x39, 0x76, 0x02, 0x22, 0x66, 0x8c, 0x95, 0xfd, 0xba, 0x01, 0xc4, 0x7b, 0xfa, + 0x91, 0xb4, 0x20, 0xe5, 0xe9, 0xfd, 0xb9, 0xf4, 0x70, 0x43, 0x6a, 0xa5, 0x0a, 0x36, 0xc4, 0x9c, + 0x13, 0xdf, 0x85, 0x1f, 0xe6, 0x86, 0xf2, 0xf7, 0x0a, 0x43, 0xf9, 0xdd, 0x92, 0xd1, 0x3a, 0x4b, + 0xf3, 0xbf, 0x9a, 0xd3, 0x7b, 0xcf, 0x97, 0xc1, 0x4e, 0x59, 0x36, 0xe1, 0x07, 0xb1, 0x56, 0x51, + 0x57, 0xed, 0x78, 0x3f, 0xab, 0x55, 0xd4, 0xbd, 0x0c, 0xb5, 0xdb, 0xc5, 0xb8, 0xd8, 0x83, 0x54, + 0x1c, 0x74, 0x41, 0x8b, 0xa6, 0x37, 0xac, 0x8a, 0xf4, 0x5b, 0xd7, 0xaa, 0xa7, 0xf2, 0x02, 0x89, + 0x95, 0x2a, 0xeb, 0xcb, 0x2e, 0x00, 0x7f, 0x09, 0xb6, 0x69, 0xfe, 0xee, 0x65, 0xe6, 0xae, 0xbf, + 0x66, 0x59, 0xde, 0x8c, 0x3b, 0xea, 0xdc, 0xdb, 0x05, 0x3f, 0x2a, 0x2e, 0xd6, 0xfb, 0x63, 0x0d, + 0x54, 0x29, 0x0b, 0x1c, 0x66, 0x15, 0x5d, 0xbc, 0xac, 0xa6, 0x71, 0x90, 0x53, 0xf3, 0xcb, 0x50, + 0x7b, 0xbd, 0xea, 0x67, 0x43, 0x91, 0x76, 0xa6, 0x3f, 0x7d, 0xfc, 0x30, 0x2b, 0xf9, 0x1f, 0x26, + 0x92, 0xbf, 0x2c, 0xe9, 0xfa, 0xa9, 0xdc, 0x5f, 0x8f, 0x4b, 0x85, 0x1b, 0xdf, 0x78, 0xfe, 0xa2, + 0xbb, 0xf4, 0xd9, 0x8b, 0xee, 0xd2, 0xe7, 0x2f, 0xba, 0x4b, 0xbf, 0x8a, 0xba, 0xb5, 0xe7, 0x51, + 0xb7, 0xf6, 0x59, 0xd4, 0xad, 0x7d, 0x1e, 0x75, 0x6b, 0x7f, 0x8f, 0xba, 0xb5, 0xdf, 0xfe, 0xa3, + 0xbb, 0xf4, 0xf1, 0xcd, 0x92, 0xdf, 0x71, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x43, 0xdf, 0xa6, + 0x7c, 0xf6, 0x15, 0x00, 0x00, } func (m *CronJob) Marshal() (dAtA []byte, err error) { @@ -887,6 +1030,35 @@ func (m *JobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PodReplacementPolicy != nil { + i -= len(*m.PodReplacementPolicy) + copy(dAtA[i:], *m.PodReplacementPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PodReplacementPolicy))) + i-- + dAtA[i] = 0x72 + } + if m.MaxFailedIndexes != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxFailedIndexes)) + i-- + dAtA[i] = 0x68 + } + if m.BackoffLimitPerIndex != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.BackoffLimitPerIndex)) + i-- + dAtA[i] = 0x60 + } + if m.PodFailurePolicy != nil { + { + size, err := m.PodFailurePolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } if m.Suspend != nil { i-- if *m.Suspend { @@ -984,6 +1156,18 @@ func (m *JobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Terminating != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Terminating)) + i-- + dAtA[i] = 0x58 + } + if m.FailedIndexes != nil { + i -= len(*m.FailedIndexes) + copy(dAtA[i:], *m.FailedIndexes) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailedIndexes))) + i-- + dAtA[i] = 0x52 + } if m.Ready != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.Ready)) i-- @@ -1099,6 +1283,172 @@ func (m *JobTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PodFailurePolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodFailurePolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodFailurePolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PodFailurePolicyOnExitCodesRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodFailurePolicyOnExitCodesRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodFailurePolicyOnExitCodesRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintGenerated(dAtA, i, uint64(m.Values[iNdEx])) + i-- + dAtA[i] = 0x18 + } + } + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + if m.ContainerName != nil { + i -= len(*m.ContainerName) + copy(dAtA[i:], *m.ContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ContainerName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PodFailurePolicyOnPodConditionsPattern) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodFailurePolicyOnPodConditionsPattern) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodFailurePolicyOnPodConditionsPattern) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodFailurePolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodFailurePolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodFailurePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.OnPodConditions) > 0 { + for iNdEx := len(m.OnPodConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OnPodConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.OnExitCodes != nil { + { + size, err := m.OnExitCodes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Action) + copy(dAtA[i:], m.Action) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *UncountedTerminatedPods) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1327,6 +1677,20 @@ func (m *JobSpec) Size() (n int) { if m.Suspend != nil { n += 2 } + if m.PodFailurePolicy != nil { + l = m.PodFailurePolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.BackoffLimitPerIndex != nil { + n += 1 + sovGenerated(uint64(*m.BackoffLimitPerIndex)) + } + if m.MaxFailedIndexes != nil { + n += 1 + sovGenerated(uint64(*m.MaxFailedIndexes)) + } + if m.PodReplacementPolicy != nil { + l = len(*m.PodReplacementPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1362,6 +1726,13 @@ func (m *JobStatus) Size() (n int) { if m.Ready != nil { n += 1 + sovGenerated(uint64(*m.Ready)) } + if m.FailedIndexes != nil { + l = len(*m.FailedIndexes) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Terminating != nil { + n += 1 + sovGenerated(uint64(*m.Terminating)) + } return n } @@ -1378,37 +1749,106 @@ func (m *JobTemplateSpec) Size() (n int) { return n } -func (m *UncountedTerminatedPods) Size() (n int) { +func (m *PodFailurePolicy) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Succeeded) > 0 { - for _, s := range m.Succeeded { - l = len(s) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } - if len(m.Failed) > 0 { - for _, s := range m.Failed { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodFailurePolicyOnExitCodesRequirement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ContainerName != nil { + l = len(*m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, e := range m.Values { + n += 1 + sovGenerated(uint64(e)) } } return n } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *CronJob) String() string { - if this == nil { - return "nil" - } +func (m *PodFailurePolicyOnPodConditionsPattern) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodFailurePolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + if m.OnExitCodes != nil { + l = m.OnExitCodes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.OnPodConditions) > 0 { + for _, e := range m.OnPodConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *UncountedTerminatedPods) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Succeeded) > 0 { + for _, s := range m.Succeeded { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Failed) > 0 { + for _, s := range m.Failed { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CronJob) String() string { + if this == nil { + return "nil" + } s := strings.Join([]string{`&CronJob{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`, @@ -1525,6 +1965,10 @@ func (this *JobSpec) String() string { `TTLSecondsAfterFinished:` + valueToStringGenerated(this.TTLSecondsAfterFinished) + `,`, `CompletionMode:` + valueToStringGenerated(this.CompletionMode) + `,`, `Suspend:` + valueToStringGenerated(this.Suspend) + `,`, + `PodFailurePolicy:` + strings.Replace(this.PodFailurePolicy.String(), "PodFailurePolicy", "PodFailurePolicy", 1) + `,`, + `BackoffLimitPerIndex:` + valueToStringGenerated(this.BackoffLimitPerIndex) + `,`, + `MaxFailedIndexes:` + valueToStringGenerated(this.MaxFailedIndexes) + `,`, + `PodReplacementPolicy:` + valueToStringGenerated(this.PodReplacementPolicy) + `,`, `}`, }, "") return s @@ -1548,6 +1992,8 @@ func (this *JobStatus) String() string { `CompletedIndexes:` + fmt.Sprintf("%v", this.CompletedIndexes) + `,`, `UncountedTerminatedPods:` + strings.Replace(this.UncountedTerminatedPods.String(), "UncountedTerminatedPods", "UncountedTerminatedPods", 1) + `,`, `Ready:` + valueToStringGenerated(this.Ready) + `,`, + `FailedIndexes:` + valueToStringGenerated(this.FailedIndexes) + `,`, + `Terminating:` + valueToStringGenerated(this.Terminating) + `,`, `}`, }, "") return s @@ -1563,6 +2009,61 @@ func (this *JobTemplateSpec) String() string { }, "") return s } +func (this *PodFailurePolicy) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PodFailurePolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PodFailurePolicyRule", "PodFailurePolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&PodFailurePolicy{`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s +} +func (this *PodFailurePolicyOnExitCodesRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodFailurePolicyOnExitCodesRequirement{`, + `ContainerName:` + valueToStringGenerated(this.ContainerName) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *PodFailurePolicyOnPodConditionsPattern) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodFailurePolicyOnPodConditionsPattern{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `}`, + }, "") + return s +} +func (this *PodFailurePolicyRule) String() string { + if this == nil { + return "nil" + } + repeatedStringForOnPodConditions := "[]PodFailurePolicyOnPodConditionsPattern{" + for _, f := range this.OnPodConditions { + repeatedStringForOnPodConditions += strings.Replace(strings.Replace(f.String(), "PodFailurePolicyOnPodConditionsPattern", "PodFailurePolicyOnPodConditionsPattern", 1), `&`, ``, 1) + "," + } + repeatedStringForOnPodConditions += "}" + s := strings.Join([]string{`&PodFailurePolicyRule{`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `OnExitCodes:` + strings.Replace(this.OnExitCodes.String(), "PodFailurePolicyOnExitCodesRequirement", "PodFailurePolicyOnExitCodesRequirement", 1) + `,`, + `OnPodConditions:` + repeatedStringForOnPodConditions + `,`, + `}`, + }, "") + return s +} func (this *UncountedTerminatedPods) String() string { if this == nil { return "nil" @@ -3048,6 +3549,115 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.Suspend = &b + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodFailurePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodFailurePolicy == nil { + m.PodFailurePolicy = &PodFailurePolicy{} + } + if err := m.PodFailurePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BackoffLimitPerIndex", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BackoffLimitPerIndex = &v + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxFailedIndexes", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxFailedIndexes = &v + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodReplacementPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PodReplacementPolicy(dAtA[iNdEx:postIndex]) + m.PodReplacementPolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3349,6 +3959,59 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } } m.Ready = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailedIndexes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.FailedIndexes = &s + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminating", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Terminating = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3486,6 +4149,547 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } return nil } +func (m *PodFailurePolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodFailurePolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodFailurePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PodFailurePolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodFailurePolicyOnExitCodesRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodFailurePolicyOnExitCodesRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodFailurePolicyOnExitCodesRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ContainerName = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = PodFailurePolicyOnExitCodesOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Values) == 0 { + m.Values = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodFailurePolicyOnPodConditionsPattern) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodFailurePolicyOnPodConditionsPattern: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodFailurePolicyOnPodConditionsPattern: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = k8s_io_api_core_v1.PodConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodFailurePolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodFailurePolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodFailurePolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Action = PodFailurePolicyAction(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OnExitCodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OnExitCodes == nil { + m.OnExitCodes = &PodFailurePolicyOnExitCodesRequirement{} + } + if err := m.OnExitCodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OnPodConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OnPodConditions = append(m.OnPodConditions, PodFailurePolicyOnPodConditionsPattern{}) + if err := m.OnPodConditions[len(m.OnPodConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *UncountedTerminatedPods) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index a57fef0b2..4f0822440 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -63,9 +63,15 @@ message CronJobSpec { // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. optional string schedule = 1; - // The time zone for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. - // If not specified, this will rely on the time zone of the kube-controller-manager process. - // ALPHA: This field is in alpha and must be enabled via the `CronJobTimeZone` feature gate. + // The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. + // If not specified, this will default to the time zone of the kube-controller-manager process. + // The set of valid time zone names and the time zone offset is loaded from the system-wide time zone + // database by the API server during CronJob validation and the controller manager during execution. + // If no system-wide time zone database can be found a bundled version of the database is used instead. + // If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host + // configuration, the controller will stop creating new new Jobs and will create a system event with the + // reason UnknownTimeZone. + // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones // +optional optional string timeZone = 8; @@ -76,6 +82,7 @@ message CronJobSpec { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one @@ -182,7 +189,7 @@ message JobSpec { optional int32 parallelism = 1; // Specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any + // job should be run with. Setting to null means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. @@ -198,11 +205,48 @@ message JobSpec { // +optional optional int64 activeDeadlineSeconds = 3; + // Specifies the policy of handling failed pods. In particular, it allows to + // specify the set of actions and conditions which need to be + // satisfied to take the associated action. + // If empty, the default behaviour applies - the counter of failed pods, + // represented by the jobs's .status.failed field, is incremented and it is + // checked against the backoffLimit. This field cannot be used in combination + // with restartPolicy=OnFailure. + // + // This field is beta-level. It can be used when the `JobPodFailurePolicy` + // feature gate is enabled (enabled by default). + // +optional + optional PodFailurePolicy podFailurePolicy = 11; + // Specifies the number of retries before marking this job failed. // Defaults to 6 // +optional optional int32 backoffLimit = 7; + // Specifies the limit for the number of retries within an + // index before marking this index as failed. When enabled the number of + // failures per index is kept in the pod's + // batch.kubernetes.io/job-index-failure-count annotation. It can only + // be set when Job's completionMode=Indexed, and the Pod's restart + // policy is Never. The field is immutable. + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). + // +optional + optional int32 backoffLimitPerIndex = 12; + + // Specifies the maximal number of failed indexes before marking the Job as + // failed, when backoffLimitPerIndex is set. Once the number of failed + // indexes exceeds this number the entire Job is marked as Failed and its + // execution is terminated. When left as null the job continues execution of + // all of its indexes and is marked with the `Complete` Job condition. + // It can only be specified when backoffLimitPerIndex is set. + // It can be null or up to completions. It is required and must be + // less than or equal to 10^4 when is completions greater than 10^5. + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). + // +optional + optional int32 maxFailedIndexes = 13; + // A label query over pods that should match the pod count. // Normally, the system sets this field for you. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors @@ -223,6 +267,7 @@ message JobSpec { optional bool manualSelector = 5; // Describes the pod that will be created when executing a job. + // The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ optional k8s.io.api.core.v1.PodTemplateSpec template = 6; @@ -236,7 +281,7 @@ message JobSpec { // +optional optional int32 ttlSecondsAfterFinished = 8; - // CompletionMode specifies how Pod completions are tracked. It can be + // completionMode specifies how Pod completions are tracked. It can be // `NonIndexed` (default) or `Indexed`. // // `NonIndexed` means that the Job is considered complete when there have @@ -261,7 +306,7 @@ message JobSpec { // +optional optional string completionMode = 9; - // Suspend specifies whether the Job controller should create Pods or not. If + // suspend specifies whether the Job controller should create Pods or not. If // a Job is created with suspend set to true, no Pods are created by the Job // controller. If a Job is suspended after creation (i.e. the flag goes from // false to true), the Job controller will delete all active Pods associated @@ -271,6 +316,19 @@ message JobSpec { // // +optional optional bool suspend = 10; + + // podReplacementPolicy specifies when to create replacement Pods. + // Possible values are: + // - TerminatingOrFailed means that we recreate pods + // when they are terminating (has a metadata.deletionTimestamp) or failed. + // - Failed means to wait until a previously created Pod is fully terminated (has phase + // Failed or Succeeded) before creating a replacement Pod. + // + // When using podFailurePolicy, Failed is the the only allowed value. + // TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. + // This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field. + // +optional + optional string podReplacementPolicy = 14; } // JobStatus represents the current state of a Job. @@ -314,7 +372,15 @@ message JobStatus { // +optional optional int32 failed = 6; - // CompletedIndexes holds the completed indexes when .spec.completionMode = + // The number of pods which are terminating (in phase Pending or Running + // and have a deletionTimestamp). + // + // This field is alpha-level. The job controller populates the field when + // the feature gate JobPodReplacementPolicy is enabled (disabled by default). + // +optional + optional int32 terminating = 11; + + // completedIndexes holds the completed indexes when .spec.completionMode = // "Indexed" in a text format. The indexes are represented as decimal integers // separated by commas. The numbers are listed in increasing order. Three or // more consecutive numbers are compressed and represented by the first and @@ -324,20 +390,31 @@ message JobStatus { // +optional optional string completedIndexes = 7; - // UncountedTerminatedPods holds the UIDs of Pods that have terminated but + // FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. + // The indexes are represented in the text format analogous as for the + // `completedIndexes` field, ie. they are kept as decimal integers + // separated by commas. The numbers are listed in increasing order. Three or + // more consecutive numbers are compressed and represented by the first and + // last element of the series, separated by a hyphen. + // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are + // represented as "1,3-5,7". + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). + // +optional + optional string failedIndexes = 10; + + // uncountedTerminatedPods holds the UIDs of Pods that have terminated but // the job controller hasn't yet accounted for in the status counters. // // The job controller creates pods with a finalizer. When a pod terminates // (succeeded or failed), the controller does three steps to account for it // in the job status: - // (1) Add the pod UID to the arrays in this field. - // (2) Remove the pod finalizer. - // (3) Remove the pod UID from the arrays while increasing the corresponding + // + // 1. Add the pod UID to the arrays in this field. + // 2. Remove the pod finalizer. + // 3. Remove the pod UID from the arrays while increasing the corresponding // counter. // - // This field is beta-level. The job controller only makes use of this field - // when the feature gate JobTrackingWithFinalizers is enabled (enabled - // by default). // Old jobs might not be tracked using this field, in which case the field // remains null. // +optional @@ -364,15 +441,108 @@ message JobTemplateSpec { optional JobSpec spec = 2; } +// PodFailurePolicy describes how failed pods influence the backoffLimit. +message PodFailurePolicy { + // A list of pod failure policy rules. The rules are evaluated in order. + // Once a rule matches a Pod failure, the remaining of the rules are ignored. + // When no rule matches the Pod failure, the default handling applies - the + // counter of pod failures is incremented and it is checked against + // the backoffLimit. At most 20 elements are allowed. + // +listType=atomic + repeated PodFailurePolicyRule rules = 1; +} + +// PodFailurePolicyOnExitCodesRequirement describes the requirement for handling +// a failed pod based on its container exit codes. In particular, it lookups the +// .state.terminated.exitCode for each app container and init container status, +// represented by the .status.containerStatuses and .status.initContainerStatuses +// fields in the Pod status, respectively. Containers completed with success +// (exit code 0) are excluded from the requirement check. +message PodFailurePolicyOnExitCodesRequirement { + // Restricts the check for exit codes to the container with the + // specified name. When null, the rule applies to all containers. + // When specified, it should match one the container or initContainer + // names in the pod template. + // +optional + optional string containerName = 1; + + // Represents the relationship between the container exit code(s) and the + // specified values. Containers completed with success (exit code 0) are + // excluded from the requirement check. Possible values are: + // + // - In: the requirement is satisfied if at least one container exit code + // (might be multiple if there are multiple containers not restricted + // by the 'containerName' field) is in the set of specified values. + // - NotIn: the requirement is satisfied if at least one container exit code + // (might be multiple if there are multiple containers not restricted + // by the 'containerName' field) is not in the set of specified values. + // Additional values are considered to be added in the future. Clients should + // react to an unknown operator by assuming the requirement is not satisfied. + optional string operator = 2; + + // Specifies the set of values. Each returned container exit code (might be + // multiple in case of multiple containers) is checked against this set of + // values with respect to the operator. The list of values must be ordered + // and must not contain duplicates. Value '0' cannot be used for the In operator. + // At least one element is required. At most 255 elements are allowed. + // +listType=set + repeated int32 values = 3; +} + +// PodFailurePolicyOnPodConditionsPattern describes a pattern for matching +// an actual pod condition type. +message PodFailurePolicyOnPodConditionsPattern { + // Specifies the required Pod condition type. To match a pod condition + // it is required that specified type equals the pod condition type. + optional string type = 1; + + // Specifies the required Pod condition status. To match a pod condition + // it is required that the specified status equals the pod condition status. + // Defaults to True. + optional string status = 2; +} + +// PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. +// One of onExitCodes and onPodConditions, but not both, can be used in each rule. +message PodFailurePolicyRule { + // Specifies the action taken on a pod failure when the requirements are satisfied. + // Possible values are: + // + // - FailJob: indicates that the pod's job is marked as Failed and all + // running pods are terminated. + // - FailIndex: indicates that the pod's index is marked as Failed and will + // not be restarted. + // This value is alpha-level. It can be used when the + // `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). + // - Ignore: indicates that the counter towards the .backoffLimit is not + // incremented and a replacement pod is created. + // - Count: indicates that the pod is handled in the default way - the + // counter towards the .backoffLimit is incremented. + // Additional values are considered to be added in the future. Clients should + // react to an unknown action by skipping the rule. + optional string action = 1; + + // Represents the requirement on the container exit codes. + // +optional + optional PodFailurePolicyOnExitCodesRequirement onExitCodes = 2; + + // Represents the requirement on the pod conditions. The requirement is represented + // as a list of pod condition patterns. The requirement is satisfied if at + // least one pattern matches an actual pod condition. At most 20 elements are allowed. + // +listType=atomic + // +optional + repeated PodFailurePolicyOnPodConditionsPattern onPodConditions = 3; +} + // UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't // been accounted in Job status counters. message UncountedTerminatedPods { - // Succeeded holds UIDs of succeeded Pods. + // succeeded holds UIDs of succeeded Pods. // +listType=set // +optional repeated string succeeded = 1; - // Failed holds UIDs of failed Pods. + // failed holds UIDs of failed Pods. // +listType=set // +optional repeated string failed = 2; diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index 110f974b7..8a28614c0 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -23,15 +23,40 @@ import ( ) const ( - JobCompletionIndexAnnotation = "batch.kubernetes.io/job-completion-index" + // All Kubernetes labels need to be prefixed with Kubernetes to distinguish them from end-user labels + // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#label-selector-and-annotation-conventions + labelPrefix = "batch.kubernetes.io/" + // CronJobScheduledTimestampAnnotation is the scheduled timestamp annotation for the Job. + // It records the original/expected scheduled timestamp for the running job, represented in RFC3339. + // The CronJob controller adds this annotation if the CronJobsScheduledAnnotation feature gate (beta in 1.28) is enabled. + CronJobScheduledTimestampAnnotation = labelPrefix + "cronjob-scheduled-timestamp" + + JobCompletionIndexAnnotation = labelPrefix + "job-completion-index" // JobTrackingFinalizer is a finalizer for Job's pods. It prevents them from // being deleted before being accounted in the Job status. - // The apiserver and job controller use this string as a Job annotation, to - // mark Jobs that are being tracked using pod finalizers. Two releases after - // the JobTrackingWithFinalizers graduates to GA, JobTrackingFinalizer will - // no longer be used as a Job annotation. - JobTrackingFinalizer = "batch.kubernetes.io/job-tracking" + // + // Additionally, the apiserver and job controller use this string as a Job + // annotation, to mark Jobs that are being tracked using pod finalizers. + // However, this behavior is deprecated in kubernetes 1.26. This means that, in + // 1.27+, one release after JobTrackingWithFinalizers graduates to GA, the + // apiserver and job controller will ignore this annotation and they will + // always track jobs using finalizers. + JobTrackingFinalizer = labelPrefix + "job-tracking" + // The Job labels will use batch.kubernetes.io as a prefix for all labels + // Historically the job controller uses unprefixed labels for job-name and controller-uid and + // Kubernetes continutes to recognize those unprefixed labels for consistency. + JobNameLabel = labelPrefix + "job-name" + // ControllerUid is used to programatically get pods corresponding to a Job. + // There is a corresponding label without the batch.kubernetes.io that we support for legacy reasons. + ControllerUidLabel = labelPrefix + "controller-uid" + // Annotation indicating the number of failures for the index corresponding + // to the pod, which are counted towards the backoff limit. + JobIndexFailureCountAnnotation = labelPrefix + "job-index-failure-count" + // Annotation indicating the number of failures for the index corresponding + // to the pod, which don't count towards the backoff limit, according to the + // pod failure policy. When the annotation is absent zero is implied. + JobIndexIgnoredFailureCountAnnotation = labelPrefix + "job-index-ignored-failure-count" ) // +genclient @@ -87,6 +112,145 @@ const ( IndexedCompletion CompletionMode = "Indexed" ) +// PodFailurePolicyAction specifies how a Pod failure is handled. +// +enum +type PodFailurePolicyAction string + +const ( + // This is an action which might be taken on a pod failure - mark the + // pod's job as Failed and terminate all running pods. + PodFailurePolicyActionFailJob PodFailurePolicyAction = "FailJob" + + // This is an action which might be taken on a pod failure - mark the + // Job's index as failed to avoid restarts within this index. This action + // can only be used when backoffLimitPerIndex is set. + PodFailurePolicyActionFailIndex PodFailurePolicyAction = "FailIndex" + + // This is an action which might be taken on a pod failure - the counter towards + // .backoffLimit, represented by the job's .status.failed field, is not + // incremented and a replacement pod is created. + PodFailurePolicyActionIgnore PodFailurePolicyAction = "Ignore" + + // This is an action which might be taken on a pod failure - the pod failure + // is handled in the default way - the counter towards .backoffLimit, + // represented by the job's .status.failed field, is incremented. + PodFailurePolicyActionCount PodFailurePolicyAction = "Count" +) + +// +enum +type PodFailurePolicyOnExitCodesOperator string + +const ( + PodFailurePolicyOnExitCodesOpIn PodFailurePolicyOnExitCodesOperator = "In" + PodFailurePolicyOnExitCodesOpNotIn PodFailurePolicyOnExitCodesOperator = "NotIn" +) + +// PodReplacementPolicy specifies the policy for creating pod replacements. +// +enum +type PodReplacementPolicy string + +const ( + // TerminatingOrFailed means that we recreate pods + // when they are terminating (has a metadata.deletionTimestamp) or failed. + TerminatingOrFailed PodReplacementPolicy = "TerminatingOrFailed" + // Failed means to wait until a previously created Pod is fully terminated (has phase + // Failed or Succeeded) before creating a replacement Pod. + Failed PodReplacementPolicy = "Failed" +) + +// PodFailurePolicyOnExitCodesRequirement describes the requirement for handling +// a failed pod based on its container exit codes. In particular, it lookups the +// .state.terminated.exitCode for each app container and init container status, +// represented by the .status.containerStatuses and .status.initContainerStatuses +// fields in the Pod status, respectively. Containers completed with success +// (exit code 0) are excluded from the requirement check. +type PodFailurePolicyOnExitCodesRequirement struct { + // Restricts the check for exit codes to the container with the + // specified name. When null, the rule applies to all containers. + // When specified, it should match one the container or initContainer + // names in the pod template. + // +optional + ContainerName *string `json:"containerName" protobuf:"bytes,1,opt,name=containerName"` + + // Represents the relationship between the container exit code(s) and the + // specified values. Containers completed with success (exit code 0) are + // excluded from the requirement check. Possible values are: + // + // - In: the requirement is satisfied if at least one container exit code + // (might be multiple if there are multiple containers not restricted + // by the 'containerName' field) is in the set of specified values. + // - NotIn: the requirement is satisfied if at least one container exit code + // (might be multiple if there are multiple containers not restricted + // by the 'containerName' field) is not in the set of specified values. + // Additional values are considered to be added in the future. Clients should + // react to an unknown operator by assuming the requirement is not satisfied. + Operator PodFailurePolicyOnExitCodesOperator `json:"operator" protobuf:"bytes,2,req,name=operator"` + + // Specifies the set of values. Each returned container exit code (might be + // multiple in case of multiple containers) is checked against this set of + // values with respect to the operator. The list of values must be ordered + // and must not contain duplicates. Value '0' cannot be used for the In operator. + // At least one element is required. At most 255 elements are allowed. + // +listType=set + Values []int32 `json:"values" protobuf:"varint,3,rep,name=values"` +} + +// PodFailurePolicyOnPodConditionsPattern describes a pattern for matching +// an actual pod condition type. +type PodFailurePolicyOnPodConditionsPattern struct { + // Specifies the required Pod condition type. To match a pod condition + // it is required that specified type equals the pod condition type. + Type corev1.PodConditionType `json:"type" protobuf:"bytes,1,req,name=type"` + + // Specifies the required Pod condition status. To match a pod condition + // it is required that the specified status equals the pod condition status. + // Defaults to True. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,req,name=status"` +} + +// PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. +// One of onExitCodes and onPodConditions, but not both, can be used in each rule. +type PodFailurePolicyRule struct { + // Specifies the action taken on a pod failure when the requirements are satisfied. + // Possible values are: + // + // - FailJob: indicates that the pod's job is marked as Failed and all + // running pods are terminated. + // - FailIndex: indicates that the pod's index is marked as Failed and will + // not be restarted. + // This value is alpha-level. It can be used when the + // `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). + // - Ignore: indicates that the counter towards the .backoffLimit is not + // incremented and a replacement pod is created. + // - Count: indicates that the pod is handled in the default way - the + // counter towards the .backoffLimit is incremented. + // Additional values are considered to be added in the future. Clients should + // react to an unknown action by skipping the rule. + Action PodFailurePolicyAction `json:"action" protobuf:"bytes,1,req,name=action"` + + // Represents the requirement on the container exit codes. + // +optional + OnExitCodes *PodFailurePolicyOnExitCodesRequirement `json:"onExitCodes" protobuf:"bytes,2,opt,name=onExitCodes"` + + // Represents the requirement on the pod conditions. The requirement is represented + // as a list of pod condition patterns. The requirement is satisfied if at + // least one pattern matches an actual pod condition. At most 20 elements are allowed. + // +listType=atomic + // +optional + OnPodConditions []PodFailurePolicyOnPodConditionsPattern `json:"onPodConditions" protobuf:"bytes,3,opt,name=onPodConditions"` +} + +// PodFailurePolicy describes how failed pods influence the backoffLimit. +type PodFailurePolicy struct { + // A list of pod failure policy rules. The rules are evaluated in order. + // Once a rule matches a Pod failure, the remaining of the rules are ignored. + // When no rule matches the Pod failure, the default handling applies - the + // counter of pod failures is incremented and it is checked against + // the backoffLimit. At most 20 elements are allowed. + // +listType=atomic + Rules []PodFailurePolicyRule `json:"rules" protobuf:"bytes,1,opt,name=rules"` +} + // JobSpec describes how the job execution will look like. type JobSpec struct { @@ -99,7 +263,7 @@ type JobSpec struct { Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` // Specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any + // job should be run with. Setting to null means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. @@ -115,11 +279,48 @@ type JobSpec struct { // +optional ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"` + // Specifies the policy of handling failed pods. In particular, it allows to + // specify the set of actions and conditions which need to be + // satisfied to take the associated action. + // If empty, the default behaviour applies - the counter of failed pods, + // represented by the jobs's .status.failed field, is incremented and it is + // checked against the backoffLimit. This field cannot be used in combination + // with restartPolicy=OnFailure. + // + // This field is beta-level. It can be used when the `JobPodFailurePolicy` + // feature gate is enabled (enabled by default). + // +optional + PodFailurePolicy *PodFailurePolicy `json:"podFailurePolicy,omitempty" protobuf:"bytes,11,opt,name=podFailurePolicy"` + // Specifies the number of retries before marking this job failed. // Defaults to 6 // +optional BackoffLimit *int32 `json:"backoffLimit,omitempty" protobuf:"varint,7,opt,name=backoffLimit"` + // Specifies the limit for the number of retries within an + // index before marking this index as failed. When enabled the number of + // failures per index is kept in the pod's + // batch.kubernetes.io/job-index-failure-count annotation. It can only + // be set when Job's completionMode=Indexed, and the Pod's restart + // policy is Never. The field is immutable. + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). + // +optional + BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty" protobuf:"varint,12,opt,name=backoffLimitPerIndex"` + + // Specifies the maximal number of failed indexes before marking the Job as + // failed, when backoffLimitPerIndex is set. Once the number of failed + // indexes exceeds this number the entire Job is marked as Failed and its + // execution is terminated. When left as null the job continues execution of + // all of its indexes and is marked with the `Complete` Job condition. + // It can only be specified when backoffLimitPerIndex is set. + // It can be null or up to completions. It is required and must be + // less than or equal to 10^4 when is completions greater than 10^5. + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). + // +optional + MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty" protobuf:"varint,13,opt,name=maxFailedIndexes"` + // TODO enabled it when https://github.com/kubernetes/kubernetes/issues/28486 has been fixed // Optional number of failed pods to retain. // +optional @@ -145,6 +346,7 @@ type JobSpec struct { ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` // Describes the pod that will be created when executing a job. + // The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ Template corev1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` @@ -158,7 +360,7 @@ type JobSpec struct { // +optional TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty" protobuf:"varint,8,opt,name=ttlSecondsAfterFinished"` - // CompletionMode specifies how Pod completions are tracked. It can be + // completionMode specifies how Pod completions are tracked. It can be // `NonIndexed` (default) or `Indexed`. // // `NonIndexed` means that the Job is considered complete when there have @@ -183,7 +385,7 @@ type JobSpec struct { // +optional CompletionMode *CompletionMode `json:"completionMode,omitempty" protobuf:"bytes,9,opt,name=completionMode,casttype=CompletionMode"` - // Suspend specifies whether the Job controller should create Pods or not. If + // suspend specifies whether the Job controller should create Pods or not. If // a Job is created with suspend set to true, no Pods are created by the Job // controller. If a Job is suspended after creation (i.e. the flag goes from // false to true), the Job controller will delete all active Pods associated @@ -193,6 +395,19 @@ type JobSpec struct { // // +optional Suspend *bool `json:"suspend,omitempty" protobuf:"varint,10,opt,name=suspend"` + + // podReplacementPolicy specifies when to create replacement Pods. + // Possible values are: + // - TerminatingOrFailed means that we recreate pods + // when they are terminating (has a metadata.deletionTimestamp) or failed. + // - Failed means to wait until a previously created Pod is fully terminated (has phase + // Failed or Succeeded) before creating a replacement Pod. + // + // When using podFailurePolicy, Failed is the the only allowed value. + // TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. + // This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field. + // +optional + PodReplacementPolicy *PodReplacementPolicy `json:"podReplacementPolicy,omitempty" protobuf:"bytes,14,opt,name=podReplacementPolicy,casttype=podReplacementPolicy"` } // JobStatus represents the current state of a Job. @@ -236,7 +451,15 @@ type JobStatus struct { // +optional Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` - // CompletedIndexes holds the completed indexes when .spec.completionMode = + // The number of pods which are terminating (in phase Pending or Running + // and have a deletionTimestamp). + // + // This field is alpha-level. The job controller populates the field when + // the feature gate JobPodReplacementPolicy is enabled (disabled by default). + // +optional + Terminating *int32 `json:"terminating,omitempty" protobuf:"varint,11,opt,name=terminating"` + + // completedIndexes holds the completed indexes when .spec.completionMode = // "Indexed" in a text format. The indexes are represented as decimal integers // separated by commas. The numbers are listed in increasing order. Three or // more consecutive numbers are compressed and represented by the first and @@ -246,20 +469,31 @@ type JobStatus struct { // +optional CompletedIndexes string `json:"completedIndexes,omitempty" protobuf:"bytes,7,opt,name=completedIndexes"` - // UncountedTerminatedPods holds the UIDs of Pods that have terminated but + // FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. + // The indexes are represented in the text format analogous as for the + // `completedIndexes` field, ie. they are kept as decimal integers + // separated by commas. The numbers are listed in increasing order. Three or + // more consecutive numbers are compressed and represented by the first and + // last element of the series, separated by a hyphen. + // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are + // represented as "1,3-5,7". + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). + // +optional + FailedIndexes *string `json:"failedIndexes,omitempty" protobuf:"bytes,10,opt,name=failedIndexes"` + + // uncountedTerminatedPods holds the UIDs of Pods that have terminated but // the job controller hasn't yet accounted for in the status counters. // // The job controller creates pods with a finalizer. When a pod terminates // (succeeded or failed), the controller does three steps to account for it // in the job status: - // (1) Add the pod UID to the arrays in this field. - // (2) Remove the pod finalizer. - // (3) Remove the pod UID from the arrays while increasing the corresponding + // + // 1. Add the pod UID to the arrays in this field. + // 2. Remove the pod finalizer. + // 3. Remove the pod UID from the arrays while increasing the corresponding // counter. // - // This field is beta-level. The job controller only makes use of this field - // when the feature gate JobTrackingWithFinalizers is enabled (enabled - // by default). // Old jobs might not be tracked using this field, in which case the field // remains null. // +optional @@ -276,12 +510,12 @@ type JobStatus struct { // UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't // been accounted in Job status counters. type UncountedTerminatedPods struct { - // Succeeded holds UIDs of succeeded Pods. + // succeeded holds UIDs of succeeded Pods. // +listType=set // +optional Succeeded []types.UID `json:"succeeded,omitempty" protobuf:"bytes,1,rep,name=succeeded,casttype=k8s.io/apimachinery/pkg/types.UID"` - // Failed holds UIDs of failed Pods. + // failed holds UIDs of failed Pods. // +listType=set // +optional Failed []types.UID `json:"failed,omitempty" protobuf:"bytes,2,rep,name=failed,casttype=k8s.io/apimachinery/pkg/types.UID"` @@ -297,6 +531,8 @@ const ( JobComplete JobConditionType = "Complete" // JobFailed means the job has failed its execution. JobFailed JobConditionType = "Failed" + // FailureTarget means the job is about to fail its execution. + JobFailureTarget JobConditionType = "FailureTarget" ) // JobCondition describes current state of a job. @@ -375,9 +611,15 @@ type CronJobSpec struct { // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"` - // The time zone for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. - // If not specified, this will rely on the time zone of the kube-controller-manager process. - // ALPHA: This field is in alpha and must be enabled via the `CronJobTimeZone` feature gate. + // The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. + // If not specified, this will default to the time zone of the kube-controller-manager process. + // The set of valid time zone names and the time zone offset is loaded from the system-wide time zone + // database by the API server during CronJob validation and the controller manager during execution. + // If no system-wide time zone database can be found a bundled version of the database is used instead. + // If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host + // configuration, the controller will stop creating new new Jobs and will create a system event with the + // reason UnknownTimeZone. + // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones // +optional TimeZone *string `json:"timeZone,omitempty" protobuf:"bytes,8,opt,name=timeZone"` @@ -388,6 +630,7 @@ type CronJobSpec struct { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index df24723a8..43b4e1e7d 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ @@ -51,9 +51,9 @@ func (CronJobList) SwaggerDoc() map[string]string { var map_CronJobSpec = map[string]string{ "": "CronJobSpec describes how the job execution will look like and when it will actually run.", "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", - "timeZone": "The time zone for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will rely on the time zone of the kube-controller-manager process. ALPHA: This field is in alpha and must be enabled via the `CronJobTimeZone` feature gate.", + "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are:\n\n- \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "jobTemplate": "Specifies the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. Value must be non-negative integer. Defaults to 3.", @@ -113,15 +113,19 @@ func (JobList) SwaggerDoc() map[string]string { var map_JobSpec = map[string]string{ "": "JobSpec describes how the job execution will look like.", "parallelism": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", - "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", + "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).", "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", + "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", - "template": "Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "template": "Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.", - "completionMode": "CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", - "suspend": "Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", + "completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", + "suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", + "podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field.", } func (JobSpec) SwaggerDoc() map[string]string { @@ -136,8 +140,10 @@ var map_JobStatus = map[string]string{ "active": "The number of pending and running pods.", "succeeded": "The number of pods which reached phase Succeeded.", "failed": "The number of pods which reached phase Failed.", - "completedIndexes": "CompletedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", - "uncountedTerminatedPods": "UncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status: (1) Add the pod UID to the arrays in this field. (2) Remove the pod finalizer. (3) Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nThis field is beta-level. The job controller only makes use of this field when the feature gate JobTrackingWithFinalizers is enabled (enabled by default). Old jobs might not be tracked using this field, in which case the field remains null.", + "terminating": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is alpha-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (disabled by default).", + "completedIndexes": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", + "failedIndexes": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.", "ready": "The number of pods which have a Ready condition.\n\nThis field is beta-level. The job controller populates the field when the feature gate JobReadyPods is enabled (enabled by default).", } @@ -155,10 +161,51 @@ func (JobTemplateSpec) SwaggerDoc() map[string]string { return map_JobTemplateSpec } +var map_PodFailurePolicy = map[string]string{ + "": "PodFailurePolicy describes how failed pods influence the backoffLimit.", + "rules": "A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed.", +} + +func (PodFailurePolicy) SwaggerDoc() map[string]string { + return map_PodFailurePolicy +} + +var map_PodFailurePolicyOnExitCodesRequirement = map[string]string{ + "": "PodFailurePolicyOnExitCodesRequirement describes the requirement for handling a failed pod based on its container exit codes. In particular, it lookups the .state.terminated.exitCode for each app container and init container status, represented by the .status.containerStatuses and .status.initContainerStatuses fields in the Pod status, respectively. Containers completed with success (exit code 0) are excluded from the requirement check.", + "containerName": "Restricts the check for exit codes to the container with the specified name. When null, the rule applies to all containers. When specified, it should match one the container or initContainer names in the pod template.", + "operator": "Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are:\n\n- In: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is in the set of specified values.\n- NotIn: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is not in the set of specified values.\nAdditional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied.", + "values": "Specifies the set of values. Each returned container exit code (might be multiple in case of multiple containers) is checked against this set of values with respect to the operator. The list of values must be ordered and must not contain duplicates. Value '0' cannot be used for the In operator. At least one element is required. At most 255 elements are allowed.", +} + +func (PodFailurePolicyOnExitCodesRequirement) SwaggerDoc() map[string]string { + return map_PodFailurePolicyOnExitCodesRequirement +} + +var map_PodFailurePolicyOnPodConditionsPattern = map[string]string{ + "": "PodFailurePolicyOnPodConditionsPattern describes a pattern for matching an actual pod condition type.", + "type": "Specifies the required Pod condition type. To match a pod condition it is required that specified type equals the pod condition type.", + "status": "Specifies the required Pod condition status. To match a pod condition it is required that the specified status equals the pod condition status. Defaults to True.", +} + +func (PodFailurePolicyOnPodConditionsPattern) SwaggerDoc() map[string]string { + return map_PodFailurePolicyOnPodConditionsPattern +} + +var map_PodFailurePolicyRule = map[string]string{ + "": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule.", + "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is alpha-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", + "onExitCodes": "Represents the requirement on the container exit codes.", + "onPodConditions": "Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed.", +} + +func (PodFailurePolicyRule) SwaggerDoc() map[string]string { + return map_PodFailurePolicyRule +} + var map_UncountedTerminatedPods = map[string]string{ "": "UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.", - "succeeded": "Succeeded holds UIDs of succeeded Pods.", - "failed": "Failed holds UIDs of failed Pods.", + "succeeded": "succeeded holds UIDs of succeeded Pods.", + "failed": "failed holds UIDs of failed Pods.", } func (UncountedTerminatedPods) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go index 4bc195a9f..43fc41515 100644 --- a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go @@ -257,11 +257,26 @@ func (in *JobSpec) DeepCopyInto(out *JobSpec) { *out = new(int64) **out = **in } + if in.PodFailurePolicy != nil { + in, out := &in.PodFailurePolicy, &out.PodFailurePolicy + *out = new(PodFailurePolicy) + (*in).DeepCopyInto(*out) + } if in.BackoffLimit != nil { in, out := &in.BackoffLimit, &out.BackoffLimit *out = new(int32) **out = **in } + if in.BackoffLimitPerIndex != nil { + in, out := &in.BackoffLimitPerIndex, &out.BackoffLimitPerIndex + *out = new(int32) + **out = **in + } + if in.MaxFailedIndexes != nil { + in, out := &in.MaxFailedIndexes, &out.MaxFailedIndexes + *out = new(int32) + **out = **in + } if in.Selector != nil { in, out := &in.Selector, &out.Selector *out = new(metav1.LabelSelector) @@ -288,6 +303,11 @@ func (in *JobSpec) DeepCopyInto(out *JobSpec) { *out = new(bool) **out = **in } + if in.PodReplacementPolicy != nil { + in, out := &in.PodReplacementPolicy, &out.PodReplacementPolicy + *out = new(PodReplacementPolicy) + **out = **in + } return } @@ -319,6 +339,16 @@ func (in *JobStatus) DeepCopyInto(out *JobStatus) { in, out := &in.CompletionTime, &out.CompletionTime *out = (*in).DeepCopy() } + if in.Terminating != nil { + in, out := &in.Terminating, &out.Terminating + *out = new(int32) + **out = **in + } + if in.FailedIndexes != nil { + in, out := &in.FailedIndexes, &out.FailedIndexes + *out = new(string) + **out = **in + } if in.UncountedTerminatedPods != nil { in, out := &in.UncountedTerminatedPods, &out.UncountedTerminatedPods *out = new(UncountedTerminatedPods) @@ -360,6 +390,97 @@ func (in *JobTemplateSpec) DeepCopy() *JobTemplateSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodFailurePolicy) DeepCopyInto(out *PodFailurePolicy) { + *out = *in + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PodFailurePolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodFailurePolicy. +func (in *PodFailurePolicy) DeepCopy() *PodFailurePolicy { + if in == nil { + return nil + } + out := new(PodFailurePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodFailurePolicyOnExitCodesRequirement) DeepCopyInto(out *PodFailurePolicyOnExitCodesRequirement) { + *out = *in + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]int32, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodFailurePolicyOnExitCodesRequirement. +func (in *PodFailurePolicyOnExitCodesRequirement) DeepCopy() *PodFailurePolicyOnExitCodesRequirement { + if in == nil { + return nil + } + out := new(PodFailurePolicyOnExitCodesRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodFailurePolicyOnPodConditionsPattern) DeepCopyInto(out *PodFailurePolicyOnPodConditionsPattern) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodFailurePolicyOnPodConditionsPattern. +func (in *PodFailurePolicyOnPodConditionsPattern) DeepCopy() *PodFailurePolicyOnPodConditionsPattern { + if in == nil { + return nil + } + out := new(PodFailurePolicyOnPodConditionsPattern) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodFailurePolicyRule) DeepCopyInto(out *PodFailurePolicyRule) { + *out = *in + if in.OnExitCodes != nil { + in, out := &in.OnExitCodes, &out.OnExitCodes + *out = new(PodFailurePolicyOnExitCodesRequirement) + (*in).DeepCopyInto(*out) + } + if in.OnPodConditions != nil { + in, out := &in.OnPodConditions, &out.OnPodConditions + *out = make([]PodFailurePolicyOnPodConditionsPattern, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodFailurePolicyRule. +func (in *PodFailurePolicyRule) DeepCopy() *PodFailurePolicyRule { + if in == nil { + return nil + } + out := new(PodFailurePolicyRule) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UncountedTerminatedPods) DeepCopyInto(out *UncountedTerminatedPods) { *out = *in diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go index d042fc695..03feb2cea 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go @@ -157,38 +157,10 @@ func (m *CronJobStatus) XXX_DiscardUnknown() { var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo -func (m *JobTemplate) Reset() { *m = JobTemplate{} } -func (*JobTemplate) ProtoMessage() {} -func (*JobTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{4} -} -func (m *JobTemplate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JobTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JobTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_JobTemplate.Merge(m, src) -} -func (m *JobTemplate) XXX_Size() int { - return m.Size() -} -func (m *JobTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_JobTemplate.DiscardUnknown(m) -} - -var xxx_messageInfo_JobTemplate proto.InternalMessageInfo - func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } func (*JobTemplateSpec) ProtoMessage() {} func (*JobTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{5} + return fileDescriptor_e57b277b05179ae7, []int{4} } func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +190,6 @@ func init() { proto.RegisterType((*CronJobList)(nil), "k8s.io.api.batch.v1beta1.CronJobList") proto.RegisterType((*CronJobSpec)(nil), "k8s.io.api.batch.v1beta1.CronJobSpec") proto.RegisterType((*CronJobStatus)(nil), "k8s.io.api.batch.v1beta1.CronJobStatus") - proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v1beta1.JobTemplate") proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v1beta1.JobTemplateSpec") } @@ -227,58 +198,57 @@ func init() { } var fileDescriptor_e57b277b05179ae7 = []byte{ - // 814 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0x41, 0x6f, 0x1b, 0x45, - 0x14, 0xc7, 0xbd, 0x4e, 0x9c, 0xb8, 0xe3, 0x16, 0xd2, 0x01, 0xa5, 0x2b, 0x83, 0xd6, 0xc1, 0x55, - 0x85, 0x41, 0x30, 0x4b, 0x22, 0x84, 0x38, 0x55, 0xea, 0x16, 0x15, 0x08, 0x41, 0x45, 0xe3, 0x22, - 0xa4, 0xaa, 0x42, 0x9d, 0x1d, 0xbf, 0x38, 0xd3, 0x78, 0x77, 0x56, 0x3b, 0xb3, 0x91, 0x72, 0xe3, - 0xc2, 0x9d, 0xef, 0xc2, 0x9d, 0x73, 0x8e, 0xbd, 0xd1, 0xd3, 0x8a, 0x2c, 0xdf, 0x82, 0x13, 0x9a, - 0xf1, 0x7a, 0xed, 0xda, 0xeb, 0xa6, 0xbd, 0xf4, 0xe6, 0x79, 0xf3, 0xff, 0xff, 0xe6, 0xed, 0x7b, - 0x6f, 0x67, 0x8d, 0xee, 0x9d, 0x7e, 0xad, 0x88, 0x90, 0xfe, 0x69, 0x16, 0x42, 0x1a, 0x83, 0x06, - 0xe5, 0x9f, 0x41, 0x3c, 0x92, 0xa9, 0x5f, 0x6e, 0xb0, 0x44, 0xf8, 0x21, 0xd3, 0xfc, 0xc4, 0x3f, - 0xdb, 0x0f, 0x41, 0xb3, 0x7d, 0x7f, 0x0c, 0x31, 0xa4, 0x4c, 0xc3, 0x88, 0x24, 0xa9, 0xd4, 0x12, - 0xbb, 0x53, 0x25, 0x61, 0x89, 0x20, 0x56, 0x49, 0x4a, 0x65, 0xf7, 0xf3, 0xb1, 0xd0, 0x27, 0x59, - 0x48, 0xb8, 0x8c, 0xfc, 0xb1, 0x1c, 0x4b, 0xdf, 0x1a, 0xc2, 0xec, 0xd8, 0xae, 0xec, 0xc2, 0xfe, - 0x9a, 0x82, 0xba, 0xb7, 0x6b, 0x8e, 0x5c, 0x3e, 0xad, 0xdb, 0x5f, 0x10, 0x71, 0x99, 0x42, 0x9d, - 0xe6, 0xcb, 0xb9, 0x26, 0x62, 0xfc, 0x44, 0xc4, 0x90, 0x9e, 0xfb, 0xc9, 0xe9, 0xd8, 0x04, 0x94, - 0x1f, 0x81, 0x66, 0x75, 0x2e, 0x7f, 0x9d, 0x2b, 0xcd, 0x62, 0x2d, 0x22, 0x58, 0x31, 0x7c, 0x75, - 0x95, 0x41, 0xf1, 0x13, 0x88, 0xd8, 0xb2, 0xaf, 0xff, 0x7b, 0x13, 0x6d, 0xdf, 0x4f, 0x65, 0x7c, - 0x28, 0x43, 0xfc, 0x14, 0xb5, 0x4d, 0x3e, 0x23, 0xa6, 0x99, 0xeb, 0xec, 0x39, 0x83, 0xce, 0xc1, - 0x17, 0x64, 0x5e, 0xcf, 0x0a, 0x4b, 0x92, 0xd3, 0xb1, 0x09, 0x28, 0x62, 0xd4, 0xe4, 0x6c, 0x9f, - 0x3c, 0x0c, 0x9f, 0x01, 0xd7, 0x3f, 0x82, 0x66, 0x01, 0xbe, 0xc8, 0x7b, 0x8d, 0x22, 0xef, 0xa1, - 0x79, 0x8c, 0x56, 0x54, 0xfc, 0x2d, 0xda, 0x54, 0x09, 0x70, 0xb7, 0x69, 0xe9, 0x77, 0xc8, 0xba, - 0x6e, 0x91, 0x32, 0xa5, 0x61, 0x02, 0x3c, 0xb8, 0x5e, 0x22, 0x37, 0xcd, 0x8a, 0x5a, 0x00, 0x7e, - 0x88, 0xb6, 0x94, 0x66, 0x3a, 0x53, 0xee, 0x86, 0x45, 0x7d, 0x7c, 0x35, 0xca, 0xca, 0x83, 0x77, - 0x4a, 0xd8, 0xd6, 0x74, 0x4d, 0x4b, 0x4c, 0xff, 0x4f, 0x07, 0x75, 0x4a, 0xe5, 0x91, 0x50, 0x1a, - 0x3f, 0x59, 0xa9, 0x05, 0x79, 0xbd, 0x5a, 0x18, 0xb7, 0xad, 0xc4, 0x4e, 0x79, 0x52, 0x7b, 0x16, - 0x59, 0xa8, 0xc3, 0x03, 0xd4, 0x12, 0x1a, 0x22, 0xe5, 0x36, 0xf7, 0x36, 0x06, 0x9d, 0x83, 0x8f, - 0xae, 0xcc, 0x3e, 0xb8, 0x51, 0xd2, 0x5a, 0xdf, 0x1b, 0x1f, 0x9d, 0xda, 0xfb, 0x7f, 0x6f, 0x56, - 0x59, 0x9b, 0xe2, 0xe0, 0xcf, 0x50, 0xdb, 0xf4, 0x79, 0x94, 0x4d, 0xc0, 0x66, 0x7d, 0x6d, 0x9e, - 0xc5, 0xb0, 0x8c, 0xd3, 0x4a, 0x81, 0x07, 0xa8, 0x6d, 0x46, 0xe3, 0xb1, 0x8c, 0xc1, 0x6d, 0x5b, - 0xf5, 0x75, 0xa3, 0x7c, 0x54, 0xc6, 0x68, 0xb5, 0x8b, 0x7f, 0x46, 0xb7, 0x94, 0x66, 0xa9, 0x16, - 0xf1, 0xf8, 0x1b, 0x60, 0xa3, 0x89, 0x88, 0x61, 0x08, 0x5c, 0xc6, 0x23, 0x65, 0x5b, 0xb9, 0x11, - 0x7c, 0x50, 0xe4, 0xbd, 0x5b, 0xc3, 0x7a, 0x09, 0x5d, 0xe7, 0xc5, 0x4f, 0xd0, 0x4d, 0x2e, 0x63, - 0x9e, 0xa5, 0x29, 0xc4, 0xfc, 0xfc, 0x27, 0x39, 0x11, 0xfc, 0xdc, 0x36, 0xf4, 0x5a, 0x40, 0xca, - 0xbc, 0x6f, 0xde, 0x5f, 0x16, 0xfc, 0x57, 0x17, 0xa4, 0xab, 0x20, 0x7c, 0x07, 0x6d, 0xab, 0x4c, - 0x25, 0x10, 0x8f, 0xdc, 0xcd, 0x3d, 0x67, 0xd0, 0x0e, 0x3a, 0x45, 0xde, 0xdb, 0x1e, 0x4e, 0x43, - 0x74, 0xb6, 0x87, 0x9f, 0xa2, 0xce, 0x33, 0x19, 0x3e, 0x82, 0x28, 0x99, 0x30, 0x0d, 0x6e, 0xcb, - 0x36, 0xfb, 0x93, 0xf5, 0x1d, 0x39, 0x9c, 0x8b, 0xed, 0x78, 0xbe, 0x57, 0x66, 0xda, 0x59, 0xd8, - 0xa0, 0x8b, 0x48, 0xfc, 0x2b, 0xea, 0xaa, 0x8c, 0x73, 0x50, 0xea, 0x38, 0x9b, 0x1c, 0xca, 0x50, - 0x7d, 0x27, 0x94, 0x96, 0xe9, 0xf9, 0x91, 0x88, 0x84, 0x76, 0xb7, 0xf6, 0x9c, 0x41, 0x2b, 0xf0, - 0x8a, 0xbc, 0xd7, 0x1d, 0xae, 0x55, 0xd1, 0x57, 0x10, 0x30, 0x45, 0xbb, 0xc7, 0x4c, 0x4c, 0x60, - 0xb4, 0xc2, 0xde, 0xb6, 0xec, 0x6e, 0x91, 0xf7, 0x76, 0x1f, 0xd4, 0x2a, 0xe8, 0x1a, 0x67, 0xff, - 0xaf, 0x26, 0xba, 0xf1, 0xd2, 0x9b, 0x83, 0x7f, 0x40, 0x5b, 0x8c, 0x6b, 0x71, 0x66, 0x26, 0xcb, - 0x0c, 0xed, 0xed, 0xc5, 0x12, 0x99, 0xdb, 0x6f, 0x7e, 0x13, 0x50, 0x38, 0x06, 0xd3, 0x09, 0x98, - 0xbf, 0x6e, 0xf7, 0xac, 0x95, 0x96, 0x08, 0x3c, 0x41, 0x3b, 0x13, 0xa6, 0xf4, 0x6c, 0x28, 0xcd, - 0xc8, 0xd9, 0x26, 0x75, 0x0e, 0x3e, 0x7d, 0xbd, 0xd7, 0xcc, 0x38, 0x82, 0xf7, 0x8b, 0xbc, 0xb7, - 0x73, 0xb4, 0xc4, 0xa1, 0x2b, 0x64, 0x9c, 0x22, 0x6c, 0x63, 0x55, 0x09, 0xed, 0x79, 0xad, 0x37, - 0x3e, 0x6f, 0xb7, 0xc8, 0x7b, 0xf8, 0x68, 0x85, 0x44, 0x6b, 0xe8, 0xfd, 0x0b, 0x07, 0x2d, 0x4e, - 0xc4, 0x5b, 0xb8, 0x5c, 0x7f, 0x41, 0x6d, 0x3d, 0x9b, 0xe2, 0xe6, 0x9b, 0x4e, 0x71, 0x75, 0x4f, - 0x54, 0x23, 0x5c, 0xc1, 0xcc, 0xdd, 0xf8, 0xee, 0x92, 0xfe, 0x2d, 0x3c, 0xce, 0xdd, 0x97, 0xbe, - 0x15, 0x1f, 0xd6, 0x3d, 0x0a, 0x79, 0xc5, 0x27, 0x22, 0xb8, 0x7b, 0x71, 0xe9, 0x35, 0x9e, 0x5f, - 0x7a, 0x8d, 0x17, 0x97, 0x5e, 0xe3, 0xb7, 0xc2, 0x73, 0x2e, 0x0a, 0xcf, 0x79, 0x5e, 0x78, 0xce, - 0x8b, 0xc2, 0x73, 0xfe, 0x29, 0x3c, 0xe7, 0x8f, 0x7f, 0xbd, 0xc6, 0x63, 0x77, 0xdd, 0x5f, 0x8b, - 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0xd7, 0xf2, 0x8b, 0xe9, 0x8e, 0x08, 0x00, 0x00, + // 787 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x95, 0x41, 0x6f, 0x1b, 0x45, + 0x14, 0xc7, 0xbd, 0x49, 0x9c, 0xb8, 0xe3, 0x16, 0xd2, 0x01, 0xa5, 0x2b, 0x83, 0xd6, 0xc1, 0x55, + 0x85, 0x41, 0x30, 0x4b, 0x22, 0x84, 0x38, 0x55, 0xea, 0x16, 0x15, 0x08, 0x41, 0x45, 0xe3, 0x72, + 0xa9, 0x2a, 0xd4, 0xd9, 0xd9, 0x17, 0x67, 0x9a, 0xdd, 0x9d, 0xd5, 0xce, 0x6c, 0xa4, 0xdc, 0xb8, + 0x70, 0xe7, 0xbb, 0x70, 0xe7, 0x9c, 0x63, 0x6f, 0xf4, 0xb4, 0x22, 0xcb, 0xb7, 0xe0, 0x84, 0x66, + 0xbc, 0xb1, 0x5d, 0x7b, 0xdd, 0x84, 0x4b, 0x6f, 0x9e, 0x37, 0xff, 0xff, 0x6f, 0x9e, 0xde, 0x7b, + 0xfb, 0x8c, 0x1e, 0x9c, 0x7c, 0xad, 0x88, 0x90, 0xfe, 0x49, 0x11, 0x42, 0x9e, 0x82, 0x06, 0xe5, + 0x9f, 0x42, 0x1a, 0xc9, 0xdc, 0xaf, 0x2f, 0x58, 0x26, 0xfc, 0x90, 0x69, 0x7e, 0xec, 0x9f, 0xee, + 0x85, 0xa0, 0xd9, 0x9e, 0x3f, 0x86, 0x14, 0x72, 0xa6, 0x21, 0x22, 0x59, 0x2e, 0xb5, 0xc4, 0xee, + 0x44, 0x49, 0x58, 0x26, 0x88, 0x55, 0x92, 0x5a, 0xd9, 0xfb, 0x7c, 0x2c, 0xf4, 0x71, 0x11, 0x12, + 0x2e, 0x13, 0x7f, 0x2c, 0xc7, 0xd2, 0xb7, 0x86, 0xb0, 0x38, 0xb2, 0x27, 0x7b, 0xb0, 0xbf, 0x26, + 0xa0, 0xde, 0xdd, 0x86, 0x27, 0x17, 0x5f, 0xeb, 0x0d, 0xe6, 0x44, 0x5c, 0xe6, 0xd0, 0xa4, 0xf9, + 0x72, 0xa6, 0x49, 0x18, 0x3f, 0x16, 0x29, 0xe4, 0x67, 0x7e, 0x76, 0x32, 0x36, 0x01, 0xe5, 0x27, + 0xa0, 0x59, 0x93, 0xcb, 0x5f, 0xe5, 0xca, 0x8b, 0x54, 0x8b, 0x04, 0x96, 0x0c, 0x5f, 0x5d, 0x65, + 0x50, 0xfc, 0x18, 0x12, 0xb6, 0xe8, 0x1b, 0xfc, 0xb6, 0x86, 0xb6, 0x1e, 0xe6, 0x32, 0x3d, 0x90, + 0x21, 0x7e, 0x8e, 0x3a, 0x26, 0x9f, 0x88, 0x69, 0xe6, 0x3a, 0xbb, 0xce, 0xb0, 0xbb, 0xff, 0x05, + 0x99, 0xd5, 0x73, 0x8a, 0x25, 0xd9, 0xc9, 0xd8, 0x04, 0x14, 0x31, 0x6a, 0x72, 0xba, 0x47, 0x1e, + 0x87, 0x2f, 0x80, 0xeb, 0x1f, 0x41, 0xb3, 0x00, 0x9f, 0x97, 0xfd, 0x56, 0x55, 0xf6, 0xd1, 0x2c, + 0x46, 0xa7, 0x54, 0xfc, 0x2d, 0xda, 0x50, 0x19, 0x70, 0x77, 0xcd, 0xd2, 0xef, 0x91, 0x55, 0xdd, + 0x22, 0x75, 0x4a, 0xa3, 0x0c, 0x78, 0x70, 0xb3, 0x46, 0x6e, 0x98, 0x13, 0xb5, 0x00, 0xfc, 0x18, + 0x6d, 0x2a, 0xcd, 0x74, 0xa1, 0xdc, 0x75, 0x8b, 0xfa, 0xf8, 0x6a, 0x94, 0x95, 0x07, 0xef, 0xd4, + 0xb0, 0xcd, 0xc9, 0x99, 0xd6, 0x98, 0xc1, 0x1f, 0x0e, 0xea, 0xd6, 0xca, 0x43, 0xa1, 0x34, 0x7e, + 0xb6, 0x54, 0x0b, 0x72, 0xbd, 0x5a, 0x18, 0xb7, 0xad, 0xc4, 0x76, 0xfd, 0x52, 0xe7, 0x32, 0x32, + 0x57, 0x87, 0x47, 0xa8, 0x2d, 0x34, 0x24, 0xca, 0x5d, 0xdb, 0x5d, 0x1f, 0x76, 0xf7, 0x3f, 0xba, + 0x32, 0xfb, 0xe0, 0x56, 0x4d, 0x6b, 0x7f, 0x6f, 0x7c, 0x74, 0x62, 0x1f, 0xfc, 0xb5, 0x31, 0xcd, + 0xda, 0x14, 0x07, 0x7f, 0x86, 0x3a, 0xa6, 0xcf, 0x51, 0x11, 0x83, 0xcd, 0xfa, 0xc6, 0x2c, 0x8b, + 0x51, 0x1d, 0xa7, 0x53, 0x05, 0x1e, 0xa2, 0x8e, 0x19, 0x8d, 0xa7, 0x32, 0x05, 0xb7, 0x63, 0xd5, + 0x37, 0x8d, 0xf2, 0x49, 0x1d, 0xa3, 0xd3, 0x5b, 0xfc, 0x33, 0xba, 0xa3, 0x34, 0xcb, 0xb5, 0x48, + 0xc7, 0xdf, 0x00, 0x8b, 0x62, 0x91, 0xc2, 0x08, 0xb8, 0x4c, 0x23, 0x65, 0x5b, 0xb9, 0x1e, 0x7c, + 0x50, 0x95, 0xfd, 0x3b, 0xa3, 0x66, 0x09, 0x5d, 0xe5, 0xc5, 0xcf, 0xd0, 0x6d, 0x2e, 0x53, 0x5e, + 0xe4, 0x39, 0xa4, 0xfc, 0xec, 0x27, 0x19, 0x0b, 0x7e, 0x66, 0x1b, 0x7a, 0x23, 0x20, 0x75, 0xde, + 0xb7, 0x1f, 0x2e, 0x0a, 0xfe, 0x6d, 0x0a, 0xd2, 0x65, 0x10, 0xbe, 0x87, 0xb6, 0x54, 0xa1, 0x32, + 0x48, 0x23, 0x77, 0x63, 0xd7, 0x19, 0x76, 0x82, 0x6e, 0x55, 0xf6, 0xb7, 0x46, 0x93, 0x10, 0xbd, + 0xbc, 0xc3, 0xcf, 0x51, 0xf7, 0x85, 0x0c, 0x9f, 0x40, 0x92, 0xc5, 0x4c, 0x83, 0xdb, 0xb6, 0xcd, + 0xfe, 0x64, 0x75, 0x47, 0x0e, 0x66, 0x62, 0x3b, 0x9e, 0xef, 0xd5, 0x99, 0x76, 0xe7, 0x2e, 0xe8, + 0x3c, 0x12, 0xff, 0x82, 0x7a, 0xaa, 0xe0, 0x1c, 0x94, 0x3a, 0x2a, 0xe2, 0x03, 0x19, 0xaa, 0xef, + 0x84, 0xd2, 0x32, 0x3f, 0x3b, 0x14, 0x89, 0xd0, 0xee, 0xe6, 0xae, 0x33, 0x6c, 0x07, 0x5e, 0x55, + 0xf6, 0x7b, 0xa3, 0x95, 0x2a, 0xfa, 0x06, 0x02, 0xa6, 0x68, 0xe7, 0x88, 0x89, 0x18, 0xa2, 0x25, + 0xf6, 0x96, 0x65, 0xf7, 0xaa, 0xb2, 0xbf, 0xf3, 0xa8, 0x51, 0x41, 0x57, 0x38, 0x07, 0x7f, 0xae, + 0xa1, 0x5b, 0xaf, 0x7d, 0x39, 0xf8, 0x07, 0xb4, 0xc9, 0xb8, 0x16, 0xa7, 0x66, 0xb2, 0xcc, 0xd0, + 0xde, 0x9d, 0x2f, 0x91, 0xd9, 0x7e, 0xb3, 0x4d, 0x40, 0xe1, 0x08, 0x4c, 0x27, 0x60, 0xf6, 0xb9, + 0x3d, 0xb0, 0x56, 0x5a, 0x23, 0x70, 0x8c, 0xb6, 0x63, 0xa6, 0xf4, 0xe5, 0x50, 0x9a, 0x91, 0xb3, + 0x4d, 0xea, 0xee, 0x7f, 0x7a, 0xbd, 0xcf, 0xcc, 0x38, 0x82, 0xf7, 0xab, 0xb2, 0xbf, 0x7d, 0xb8, + 0xc0, 0xa1, 0x4b, 0x64, 0x9c, 0x23, 0x6c, 0x63, 0xd3, 0x12, 0xda, 0xf7, 0xda, 0xff, 0xfb, 0xbd, + 0x9d, 0xaa, 0xec, 0xe3, 0xc3, 0x25, 0x12, 0x6d, 0xa0, 0x9b, 0x85, 0xf2, 0xee, 0xc2, 0xa8, 0xbc, + 0x85, 0x05, 0x7b, 0xff, 0xb5, 0x05, 0xfb, 0x61, 0xd3, 0x14, 0x93, 0x37, 0xec, 0xd5, 0xe0, 0xfe, + 0xf9, 0x85, 0xd7, 0x7a, 0x79, 0xe1, 0xb5, 0x5e, 0x5d, 0x78, 0xad, 0x5f, 0x2b, 0xcf, 0x39, 0xaf, + 0x3c, 0xe7, 0x65, 0xe5, 0x39, 0xaf, 0x2a, 0xcf, 0xf9, 0xbb, 0xf2, 0x9c, 0xdf, 0xff, 0xf1, 0x5a, + 0x4f, 0xdd, 0x55, 0xff, 0xc7, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x61, 0x72, 0xc3, 0xe0, 0xc3, + 0x07, 0x00, 0x00, } func (m *CronJob) Marshal() (dAtA []byte, err error) { @@ -517,49 +487,6 @@ func (m *CronJobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *JobTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JobTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -700,19 +627,6 @@ func (m *CronJobStatus) Size() (n int) { return n } -func (m *JobTemplate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *JobTemplateSpec) Size() (n int) { if m == nil { return 0 @@ -794,17 +708,6 @@ func (this *CronJobStatus) String() string { }, "") return s } -func (this *JobTemplate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *JobTemplateSpec) String() string { if this == nil { return "nil" @@ -1507,122 +1410,6 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *JobTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto index c73b7a654..ac774f19a 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -64,9 +64,15 @@ message CronJobSpec { // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. optional string schedule = 1; - // The time zone for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. - // If not specified, this will rely on the time zone of the kube-controller-manager process. - // ALPHA: This field is in alpha and must be enabled via the `CronJobTimeZone` feature gate. + // The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. + // If not specified, this will default to the time zone of the kube-controller-manager process. + // The set of valid time zone names and the time zone offset is loaded from the system-wide time zone + // database by the API server during CronJob validation and the controller manager during execution. + // If no system-wide time zone database can be found a bundled version of the database is used instead. + // If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host + // configuration, the controller will stop creating new new Jobs and will create a system event with the + // reason UnknownTimeZone. + // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones // +optional optional string timeZone = 8; @@ -77,6 +83,7 @@ message CronJobSpec { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one @@ -120,19 +127,6 @@ message CronJobStatus { optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5; } -// JobTemplate describes a template for creating copies of a predefined pod. -message JobTemplate { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional JobTemplateSpec template = 2; -} - // JobTemplateSpec describes the data a Job should have when created from a template message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. diff --git a/vendor/k8s.io/api/batch/v1beta1/register.go b/vendor/k8s.io/api/batch/v1beta1/register.go index 226de49f4..9382ca23f 100644 --- a/vendor/k8s.io/api/batch/v1beta1/register.go +++ b/vendor/k8s.io/api/batch/v1beta1/register.go @@ -44,7 +44,6 @@ var ( // Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &JobTemplate{}, &CronJob{}, &CronJobList{}, ) diff --git a/vendor/k8s.io/api/batch/v1beta1/types.go b/vendor/k8s.io/api/batch/v1beta1/types.go index 54a2d1431..976752a92 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types.go +++ b/vendor/k8s.io/api/batch/v1beta1/types.go @@ -22,24 +22,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.8 -// +k8s:prerelease-lifecycle-gen:deprecated=1.22 - -// JobTemplate describes a template for creating copies of a predefined pod. -type JobTemplate struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` -} - // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. @@ -104,9 +86,15 @@ type CronJobSpec struct { // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"` - // The time zone for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. - // If not specified, this will rely on the time zone of the kube-controller-manager process. - // ALPHA: This field is in alpha and must be enabled via the `CronJobTimeZone` feature gate. + // The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. + // If not specified, this will default to the time zone of the kube-controller-manager process. + // The set of valid time zone names and the time zone offset is loaded from the system-wide time zone + // database by the API server during CronJob validation and the controller manager during execution. + // If no system-wide time zone database can be found a bundled version of the database is used instead. + // If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host + // configuration, the controller will stop creating new new Jobs and will create a system event with the + // reason UnknownTimeZone. + // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones // +optional TimeZone *string `json:"timeZone,omitempty" protobuf:"bytes,8,opt,name=timeZone"` @@ -117,6 +105,7 @@ type CronJobSpec struct { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one diff --git a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go index 871948076..3b3eafe8c 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ @@ -51,9 +51,9 @@ func (CronJobList) SwaggerDoc() map[string]string { var map_CronJobSpec = map[string]string{ "": "CronJobSpec describes how the job execution will look like and when it will actually run.", "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", - "timeZone": "The time zone for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will rely on the time zone of the kube-controller-manager process. ALPHA: This field is in alpha and must be enabled via the `CronJobTimeZone` feature gate.", + "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are:\n\n- \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "jobTemplate": "Specifies the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3.", @@ -75,16 +75,6 @@ func (CronJobStatus) SwaggerDoc() map[string]string { return map_CronJobStatus } -var map_JobTemplate = map[string]string{ - "": "JobTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", -} - -func (JobTemplate) SwaggerDoc() map[string]string { - return map_JobTemplate -} - var map_JobTemplateSpec = map[string]string{ "": "JobTemplateSpec describes the data a Job should have when created from a template", "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go index c3a3494c4..2c8570332 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go @@ -158,33 +158,6 @@ func (in *CronJobStatus) DeepCopy() *CronJobStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JobTemplate) DeepCopyInto(out *JobTemplate) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplate. -func (in *JobTemplate) DeepCopy() *JobTemplate { - if in == nil { - return nil - } - out := new(JobTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *JobTemplate) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *JobTemplateSpec) DeepCopyInto(out *JobTemplateSpec) { *out = *in diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go index 2836b3b01..b57e9f1b8 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go @@ -72,21 +72,3 @@ func (in *CronJobList) APILifecycleReplacement() schema.GroupVersionKind { func (in *CronJobList) APILifecycleRemoved() (major, minor int) { return 1, 25 } - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *JobTemplate) APILifecycleIntroduced() (major, minor int) { - return 1, 8 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *JobTemplate) APILifecycleDeprecated() (major, minor int) { - return 1, 22 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *JobTemplate) APILifecycleRemoved() (major, minor int) { - return 1, 25 -} diff --git a/vendor/k8s.io/api/certificates/v1/types.go b/vendor/k8s.io/api/certificates/v1/types.go index 752c66973..92b2018e7 100644 --- a/vendor/k8s.io/api/certificates/v1/types.go +++ b/vendor/k8s.io/api/certificates/v1/types.go @@ -274,8 +274,11 @@ type CertificateSigningRequestList struct { } // KeyUsage specifies valid usage contexts for keys. -// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 -// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 +// See: +// +// https://tools.ietf.org/html/rfc5280#section-4.2.1.3 +// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 +// // +enum type KeyUsage string diff --git a/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go index 0dc8a4c69..4bdf39ebb 100644 --- a/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CertificateSigningRequest = map[string]string{ diff --git a/vendor/k8s.io/api/certificates/v1alpha1/doc.go b/vendor/k8s.io/api/certificates/v1alpha1/doc.go new file mode 100644 index 000000000..d83d0e820 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true + +// +groupName=certificates.k8s.io + +package v1alpha1 // import "k8s.io/api/certificates/v1alpha1" diff --git a/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go b/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go new file mode 100644 index 000000000..546ecbefb --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go @@ -0,0 +1,831 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ClusterTrustBundle) Reset() { *m = ClusterTrustBundle{} } +func (*ClusterTrustBundle) ProtoMessage() {} +func (*ClusterTrustBundle) Descriptor() ([]byte, []int) { + return fileDescriptor_8915b0d419f9eda6, []int{0} +} +func (m *ClusterTrustBundle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundle) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundle.Merge(m, src) +} +func (m *ClusterTrustBundle) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundle) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundle.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundle proto.InternalMessageInfo + +func (m *ClusterTrustBundleList) Reset() { *m = ClusterTrustBundleList{} } +func (*ClusterTrustBundleList) ProtoMessage() {} +func (*ClusterTrustBundleList) Descriptor() ([]byte, []int) { + return fileDescriptor_8915b0d419f9eda6, []int{1} +} +func (m *ClusterTrustBundleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundleList.Merge(m, src) +} +func (m *ClusterTrustBundleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundleList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundleList proto.InternalMessageInfo + +func (m *ClusterTrustBundleSpec) Reset() { *m = ClusterTrustBundleSpec{} } +func (*ClusterTrustBundleSpec) ProtoMessage() {} +func (*ClusterTrustBundleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_8915b0d419f9eda6, []int{2} +} +func (m *ClusterTrustBundleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundleSpec.Merge(m, src) +} +func (m *ClusterTrustBundleSpec) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundleSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundleSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ClusterTrustBundle)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundle") + proto.RegisterType((*ClusterTrustBundleList)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleList") + proto.RegisterType((*ClusterTrustBundleSpec)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleSpec") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1alpha1/generated.proto", fileDescriptor_8915b0d419f9eda6) +} + +var fileDescriptor_8915b0d419f9eda6 = []byte{ + // 448 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0x6b, 0x13, 0x41, + 0x14, 0xc7, 0x77, 0x6a, 0x0b, 0xed, 0x44, 0x41, 0x56, 0x90, 0x90, 0xc3, 0x34, 0xe4, 0xd4, 0x8b, + 0x33, 0x26, 0x54, 0xe9, 0x79, 0x05, 0xa1, 0xe0, 0x0f, 0xd8, 0x7a, 0xb1, 0x78, 0x70, 0x32, 0x79, + 0xdd, 0x8c, 0xc9, 0xee, 0x0e, 0x33, 0xb3, 0x01, 0x6f, 0x82, 0xff, 0x80, 0x7f, 0x56, 0x8e, 0xd5, + 0x53, 0x4f, 0xc5, 0xac, 0xff, 0x88, 0xcc, 0x64, 0x93, 0x5d, 0x5c, 0x25, 0xd2, 0xdb, 0xbe, 0x1f, + 0x9f, 0xef, 0x7b, 0xdf, 0xb7, 0x0c, 0x3e, 0x9f, 0x9d, 0x19, 0x2a, 0x73, 0x36, 0x2b, 0xc6, 0xa0, + 0x33, 0xb0, 0x60, 0xd8, 0x02, 0xb2, 0x49, 0xae, 0x59, 0x55, 0xe0, 0x4a, 0x32, 0x01, 0xda, 0xca, + 0x2b, 0x29, 0xb8, 0x2f, 0x0f, 0xf9, 0x5c, 0x4d, 0xf9, 0x90, 0x25, 0x90, 0x81, 0xe6, 0x16, 0x26, + 0x54, 0xe9, 0xdc, 0xe6, 0x61, 0x7f, 0x4d, 0x50, 0xae, 0x24, 0x6d, 0x12, 0x74, 0x43, 0xf4, 0x9e, + 0x24, 0xd2, 0x4e, 0x8b, 0x31, 0x15, 0x79, 0xca, 0x92, 0x3c, 0xc9, 0x99, 0x07, 0xc7, 0xc5, 0x95, + 0x8f, 0x7c, 0xe0, 0xbf, 0xd6, 0x82, 0xbd, 0xd3, 0x7a, 0x85, 0x94, 0x8b, 0xa9, 0xcc, 0x40, 0x7f, + 0x66, 0x6a, 0x96, 0xb8, 0x84, 0x61, 0x29, 0x58, 0xce, 0x16, 0xad, 0x35, 0x7a, 0xec, 0x5f, 0x94, + 0x2e, 0x32, 0x2b, 0x53, 0x68, 0x01, 0xcf, 0x77, 0x01, 0x46, 0x4c, 0x21, 0xe5, 0x7f, 0x72, 0x83, + 0x1f, 0x08, 0x87, 0x2f, 0xe6, 0x85, 0xb1, 0xa0, 0xdf, 0xe9, 0xc2, 0xd8, 0xa8, 0xc8, 0x26, 0x73, + 0x08, 0x3f, 0xe2, 0x43, 0xb7, 0xda, 0x84, 0x5b, 0xde, 0x45, 0x7d, 0x74, 0xd2, 0x19, 0x3d, 0xa5, + 0xf5, 0x65, 0xb6, 0x13, 0xa8, 0x9a, 0x25, 0x2e, 0x61, 0xa8, 0xeb, 0xa6, 0x8b, 0x21, 0x7d, 0x3b, + 0xfe, 0x04, 0xc2, 0xbe, 0x06, 0xcb, 0xa3, 0x70, 0x79, 0x7b, 0x1c, 0x94, 0xb7, 0xc7, 0xb8, 0xce, + 0xc5, 0x5b, 0xd5, 0xf0, 0x12, 0xef, 0x1b, 0x05, 0xa2, 0xbb, 0xe7, 0xd5, 0xcf, 0xe8, 0xae, 0xbb, + 0xd3, 0xf6, 0x96, 0x17, 0x0a, 0x44, 0x74, 0xbf, 0x9a, 0xb2, 0xef, 0xa2, 0xd8, 0x6b, 0x0e, 0xbe, + 0x23, 0xfc, 0xb8, 0xdd, 0xfe, 0x4a, 0x1a, 0x1b, 0x7e, 0x68, 0x19, 0xa3, 0xff, 0x67, 0xcc, 0xd1, + 0xde, 0xd6, 0xc3, 0x6a, 0xe0, 0xe1, 0x26, 0xd3, 0x30, 0xf5, 0x1e, 0x1f, 0x48, 0x0b, 0xa9, 0xe9, + 0xee, 0xf5, 0xef, 0x9d, 0x74, 0x46, 0xa7, 0x77, 0x71, 0x15, 0x3d, 0xa8, 0x06, 0x1c, 0x9c, 0x3b, + 0xa9, 0x78, 0xad, 0x38, 0xf8, 0xfa, 0x57, 0x4f, 0xce, 0x74, 0x38, 0xc2, 0xd8, 0xc8, 0x24, 0x03, + 0xfd, 0x86, 0xa7, 0xe0, 0x5d, 0x1d, 0xd5, 0xc7, 0xbf, 0xd8, 0x56, 0xe2, 0x46, 0x57, 0xf8, 0x0c, + 0x77, 0x6c, 0x2d, 0xe3, 0xff, 0xc2, 0x51, 0xf4, 0xa8, 0x82, 0x3a, 0x8d, 0x09, 0x71, 0xb3, 0x2f, + 0x7a, 0xb9, 0x5c, 0x91, 0xe0, 0x7a, 0x45, 0x82, 0x9b, 0x15, 0x09, 0xbe, 0x94, 0x04, 0x2d, 0x4b, + 0x82, 0xae, 0x4b, 0x82, 0x6e, 0x4a, 0x82, 0x7e, 0x96, 0x04, 0x7d, 0xfb, 0x45, 0x82, 0xcb, 0xfe, + 0xae, 0x67, 0xf7, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x05, 0xe9, 0xaa, 0x07, 0xb2, 0x03, 0x00, 0x00, +} + +func (m *ClusterTrustBundle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundle) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterTrustBundleList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundleList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterTrustBundleSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundleSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.TrustBundle) + copy(dAtA[i:], m.TrustBundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TrustBundle))) + i-- + dAtA[i] = 0x12 + i -= len(m.SignerName) + copy(dAtA[i:], m.SignerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClusterTrustBundle) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterTrustBundleList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterTrustBundleSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SignerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.TrustBundle) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterTrustBundle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterTrustBundle{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterTrustBundleSpec", "ClusterTrustBundleSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterTrustBundleList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterTrustBundle{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterTrustBundle", "ClusterTrustBundle", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterTrustBundleList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterTrustBundleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterTrustBundleSpec{`, + `SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`, + `TrustBundle:` + fmt.Sprintf("%v", this.TrustBundle) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterTrustBundle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterTrustBundle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterTrustBundle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterTrustBundleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterTrustBundleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterTrustBundleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterTrustBundle{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterTrustBundleSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterTrustBundleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterTrustBundleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SignerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustBundle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TrustBundle = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/certificates/v1alpha1/generated.proto b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto new file mode 100644 index 000000000..b0ebc4bd4 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto @@ -0,0 +1,103 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.certificates.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/certificates/v1alpha1"; + +// ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors +// (root certificates). +// +// ClusterTrustBundle objects are considered to be readable by any authenticated +// user in the cluster, because they can be mounted by pods using the +// `clusterTrustBundle` projection. All service accounts have read access to +// ClusterTrustBundles by default. Users who only have namespace-level access +// to a cluster can read ClusterTrustBundles by impersonating a serviceaccount +// that they have access to. +// +// It can be optionally associated with a particular assigner, in which case it +// contains one valid set of trust anchors for that signer. Signers may have +// multiple associated ClusterTrustBundles; each is an independent set of trust +// anchors for that signer. Admission control is used to enforce that only users +// with permissions on the signer can create or modify the corresponding bundle. +message ClusterTrustBundle { + // metadata contains the object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec contains the signer (if any) and trust anchors. + optional ClusterTrustBundleSpec spec = 2; +} + +// ClusterTrustBundleList is a collection of ClusterTrustBundle objects +message ClusterTrustBundleList { + // metadata contains the list metadata. + // + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a collection of ClusterTrustBundle objects + repeated ClusterTrustBundle items = 2; +} + +// ClusterTrustBundleSpec contains the signer and trust anchors. +message ClusterTrustBundleSpec { + // signerName indicates the associated signer, if any. + // + // In order to create or update a ClusterTrustBundle that sets signerName, + // you must have the following cluster-scoped permission: + // group=certificates.k8s.io resource=signers resourceName= + // verb=attest. + // + // If signerName is not empty, then the ClusterTrustBundle object must be + // named with the signer name as a prefix (translating slashes to colons). + // For example, for the signer name `example.com/foo`, valid + // ClusterTrustBundle object names include `example.com:foo:abc` and + // `example.com:foo:v1`. + // + // If signerName is empty, then the ClusterTrustBundle object's name must + // not have such a prefix. + // + // List/watch requests for ClusterTrustBundles can filter on this field + // using a `spec.signerName=NAME` field selector. + // + // +optional + optional string signerName = 1; + + // trustBundle contains the individual X.509 trust anchors for this + // bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates. + // + // The data must consist only of PEM certificate blocks that parse as valid + // X.509 certificates. Each certificate must include a basic constraints + // extension with the CA bit set. The API server will reject objects that + // contain duplicate certificates, or that use PEM block headers. + // + // Users of ClusterTrustBundles, including Kubelet, are free to reorder and + // deduplicate certificate blocks in this file according to their own logic, + // as well as to drop PEM block headers and inter-block data. + optional string trustBundle = 2; +} + diff --git a/vendor/k8s.io/api/certificates/v1alpha1/register.go b/vendor/k8s.io/api/certificates/v1alpha1/register.go new file mode 100644 index 000000000..7288ed9a3 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/register.go @@ -0,0 +1,61 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "certificates.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + + localSchemeBuilder = &SchemeBuilder + + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ClusterTrustBundle{}, + &ClusterTrustBundleList{}, + ) + + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/certificates/v1alpha1/types.go b/vendor/k8s.io/api/certificates/v1alpha1/types.go new file mode 100644 index 000000000..1a9fda011 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/types.go @@ -0,0 +1,106 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors +// (root certificates). +// +// ClusterTrustBundle objects are considered to be readable by any authenticated +// user in the cluster, because they can be mounted by pods using the +// `clusterTrustBundle` projection. All service accounts have read access to +// ClusterTrustBundles by default. Users who only have namespace-level access +// to a cluster can read ClusterTrustBundles by impersonating a serviceaccount +// that they have access to. +// +// It can be optionally associated with a particular assigner, in which case it +// contains one valid set of trust anchors for that signer. Signers may have +// multiple associated ClusterTrustBundles; each is an independent set of trust +// anchors for that signer. Admission control is used to enforce that only users +// with permissions on the signer can create or modify the corresponding bundle. +type ClusterTrustBundle struct { + metav1.TypeMeta `json:",inline"` + + // metadata contains the object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec contains the signer (if any) and trust anchors. + Spec ClusterTrustBundleSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// ClusterTrustBundleSpec contains the signer and trust anchors. +type ClusterTrustBundleSpec struct { + // signerName indicates the associated signer, if any. + // + // In order to create or update a ClusterTrustBundle that sets signerName, + // you must have the following cluster-scoped permission: + // group=certificates.k8s.io resource=signers resourceName= + // verb=attest. + // + // If signerName is not empty, then the ClusterTrustBundle object must be + // named with the signer name as a prefix (translating slashes to colons). + // For example, for the signer name `example.com/foo`, valid + // ClusterTrustBundle object names include `example.com:foo:abc` and + // `example.com:foo:v1`. + // + // If signerName is empty, then the ClusterTrustBundle object's name must + // not have such a prefix. + // + // List/watch requests for ClusterTrustBundles can filter on this field + // using a `spec.signerName=NAME` field selector. + // + // +optional + SignerName string `json:"signerName,omitempty" protobuf:"bytes,1,opt,name=signerName"` + + // trustBundle contains the individual X.509 trust anchors for this + // bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates. + // + // The data must consist only of PEM certificate blocks that parse as valid + // X.509 certificates. Each certificate must include a basic constraints + // extension with the CA bit set. The API server will reject objects that + // contain duplicate certificates, or that use PEM block headers. + // + // Users of ClusterTrustBundles, including Kubelet, are free to reorder and + // deduplicate certificate blocks in this file according to their own logic, + // as well as to drop PEM block headers and inter-block data. + TrustBundle string `json:"trustBundle" protobuf:"bytes,2,opt,name=trustBundle"` +} + +// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterTrustBundleList is a collection of ClusterTrustBundle objects +type ClusterTrustBundleList struct { + metav1.TypeMeta `json:",inline"` + + // metadata contains the list metadata. + // + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a collection of ClusterTrustBundle objects + Items []ClusterTrustBundle `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..bff649e3c --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,60 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ClusterTrustBundle = map[string]string{ + "": "ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors (root certificates).\n\nClusterTrustBundle objects are considered to be readable by any authenticated user in the cluster, because they can be mounted by pods using the `clusterTrustBundle` projection. All service accounts have read access to ClusterTrustBundles by default. Users who only have namespace-level access to a cluster can read ClusterTrustBundles by impersonating a serviceaccount that they have access to.\n\nIt can be optionally associated with a particular assigner, in which case it contains one valid set of trust anchors for that signer. Signers may have multiple associated ClusterTrustBundles; each is an independent set of trust anchors for that signer. Admission control is used to enforce that only users with permissions on the signer can create or modify the corresponding bundle.", + "metadata": "metadata contains the object metadata.", + "spec": "spec contains the signer (if any) and trust anchors.", +} + +func (ClusterTrustBundle) SwaggerDoc() map[string]string { + return map_ClusterTrustBundle +} + +var map_ClusterTrustBundleList = map[string]string{ + "": "ClusterTrustBundleList is a collection of ClusterTrustBundle objects", + "metadata": "metadata contains the list metadata.", + "items": "items is a collection of ClusterTrustBundle objects", +} + +func (ClusterTrustBundleList) SwaggerDoc() map[string]string { + return map_ClusterTrustBundleList +} + +var map_ClusterTrustBundleSpec = map[string]string{ + "": "ClusterTrustBundleSpec contains the signer and trust anchors.", + "signerName": "signerName indicates the associated signer, if any.\n\nIn order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName= verb=attest.\n\nIf signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`.\n\nIf signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix.\n\nList/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector.", + "trustBundle": "trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.\n\nThe data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers.\n\nUsers of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data.", +} + +func (ClusterTrustBundleSpec) SwaggerDoc() map[string]string { + return map_ClusterTrustBundleSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..30a4dc1e8 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,102 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundle) DeepCopyInto(out *ClusterTrustBundle) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundle. +func (in *ClusterTrustBundle) DeepCopy() *ClusterTrustBundle { + if in == nil { + return nil + } + out := new(ClusterTrustBundle) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterTrustBundle) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundleList) DeepCopyInto(out *ClusterTrustBundleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterTrustBundle, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleList. +func (in *ClusterTrustBundleList) DeepCopy() *ClusterTrustBundleList { + if in == nil { + return nil + } + out := new(ClusterTrustBundleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterTrustBundleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundleSpec) DeepCopyInto(out *ClusterTrustBundleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleSpec. +func (in *ClusterTrustBundleSpec) DeepCopy() *ClusterTrustBundleSpec { + if in == nil { + return nil + } + out := new(ClusterTrustBundleSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..dfafa656c --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,58 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterTrustBundle) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ClusterTrustBundle) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ClusterTrustBundle) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterTrustBundleList) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ClusterTrustBundleList) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ClusterTrustBundleList) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto index e246fba02..f70f01ef7 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -124,8 +124,10 @@ message CertificateSigningRequestSpec { // allowedUsages specifies a set of usage contexts the key will be // valid for. - // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // See: + // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // // Valid values are: // "signing", // "digital signature", diff --git a/vendor/k8s.io/api/certificates/v1beta1/types.go b/vendor/k8s.io/api/certificates/v1beta1/types.go index 877312d97..7e5a5c198 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/types.go +++ b/vendor/k8s.io/api/certificates/v1beta1/types.go @@ -89,8 +89,10 @@ type CertificateSigningRequestSpec struct { // allowedUsages specifies a set of usage contexts the key will be // valid for. - // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // See: + // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // // Valid values are: // "signing", // "digital signature", @@ -229,8 +231,10 @@ type CertificateSigningRequestList struct { } // KeyUsages specifies valid usage contexts for keys. -// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 -// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 +// See: +// +// https://tools.ietf.org/html/rfc5280#section-4.2.1.3 +// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 type KeyUsage string const ( diff --git a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go index d3f318150..f9ab1f13d 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CertificateSigningRequest = map[string]string{ @@ -55,7 +55,7 @@ var map_CertificateSigningRequestSpec = map[string]string{ "request": "Base64-encoded PKCS#10 CSR data", "signerName": "Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:\n 1. If it's a kubelet client certificate, it is assigned\n \"kubernetes.io/kube-apiserver-client-kubelet\".\n 2. If it's a kubelet serving certificate, it is assigned\n \"kubernetes.io/kubelet-serving\".\n 3. Otherwise, it is assigned \"kubernetes.io/legacy-unknown\".\nDistribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.", "expirationSeconds": "expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration.\n\nThe v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager.\n\nCertificate signers may not honor this field for various reasons:\n\n 1. Old signer that is unaware of the field (such as the in-tree\n implementations prior to v1.22)\n 2. Signer whose configured maximum is shorter than the requested duration\n 3. Signer whose configured minimum is longer than the requested duration\n\nThe minimum valid value for expirationSeconds is 600, i.e. 10 minutes.", - "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12\nValid values are:\n \"signing\",\n \"digital signature\",\n \"content commitment\",\n \"key encipherment\",\n \"key agreement\",\n \"data encipherment\",\n \"cert sign\",\n \"crl sign\",\n \"encipher only\",\n \"decipher only\",\n \"any\",\n \"server auth\",\n \"client auth\",\n \"code signing\",\n \"email protection\",\n \"s/mime\",\n \"ipsec end system\",\n \"ipsec tunnel\",\n \"ipsec user\",\n \"timestamping\",\n \"ocsp signing\",\n \"microsoft sgc\",\n \"netscape sgc\"", + "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See:\n\thttps://tools.ietf.org/html/rfc5280#section-4.2.1.3\n\thttps://tools.ietf.org/html/rfc5280#section-4.2.1.12\n\nValid values are:\n \"signing\",\n \"digital signature\",\n \"content commitment\",\n \"key encipherment\",\n \"key agreement\",\n \"data encipherment\",\n \"cert sign\",\n \"crl sign\",\n \"encipher only\",\n \"decipher only\",\n \"any\",\n \"server auth\",\n \"client auth\",\n \"code signing\",\n \"email protection\",\n \"s/mime\",\n \"ipsec end system\",\n \"ipsec tunnel\",\n \"ipsec user\",\n \"timestamping\",\n \"ocsp signing\",\n \"microsoft sgc\",\n \"netscape sgc\"", "username": "Information about the requesting user. See user.Info interface for details.", "uid": "UID information about the requesting user. See user.Info interface for details.", "groups": "Group information about the requesting user. See user.Info interface for details.", diff --git a/vendor/k8s.io/api/coordination/v1/generated.proto b/vendor/k8s.io/api/coordination/v1/generated.proto index b1efb737f..36fce60f2 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1/generated.proto @@ -34,7 +34,7 @@ message Lease { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LeaseSpec spec = 2; @@ -47,7 +47,7 @@ message LeaseList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated Lease items = 2; } @@ -59,7 +59,7 @@ message LeaseSpec { // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional optional int32 leaseDurationSeconds = 2; diff --git a/vendor/k8s.io/api/coordination/v1/types.go b/vendor/k8s.io/api/coordination/v1/types.go index 7a5605ace..b0e1d0682 100644 --- a/vendor/k8s.io/api/coordination/v1/types.go +++ b/vendor/k8s.io/api/coordination/v1/types.go @@ -30,7 +30,7 @@ type Lease struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -43,7 +43,7 @@ type LeaseSpec struct { HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` // acquireTime is a time when the current lease was acquired. @@ -69,6 +69,6 @@ type LeaseList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go index 0f1440430..f3720eca0 100644 --- a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Lease = map[string]string{ "": "Lease defines a lease concept.", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Lease) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (Lease) SwaggerDoc() map[string]string { var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (LeaseList) SwaggerDoc() map[string]string { @@ -50,7 +50,7 @@ func (LeaseList) SwaggerDoc() map[string]string { var map_LeaseSpec = map[string]string{ "": "LeaseSpec is a specification of a Lease.", "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", - "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.", + "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", "acquireTime": "acquireTime is a time when the current lease was acquired.", "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto index 85faa3b09..92c8918b8 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.proto @@ -34,7 +34,7 @@ message Lease { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LeaseSpec spec = 2; @@ -47,7 +47,7 @@ message LeaseList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated Lease items = 2; } @@ -59,7 +59,7 @@ message LeaseSpec { // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional optional int32 leaseDurationSeconds = 2; diff --git a/vendor/k8s.io/api/coordination/v1beta1/types.go b/vendor/k8s.io/api/coordination/v1beta1/types.go index 8f300fca8..3a3d5f32e 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types.go @@ -33,7 +33,7 @@ type Lease struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -46,7 +46,7 @@ type LeaseSpec struct { HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` // acquireTime is a time when the current lease was acquired. @@ -75,6 +75,6 @@ type LeaseList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go index f557d265d..78ca4e393 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Lease = map[string]string{ "": "Lease defines a lease concept.", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Lease) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (Lease) SwaggerDoc() map[string]string { var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (LeaseList) SwaggerDoc() map[string]string { @@ -50,7 +50,7 @@ func (LeaseList) SwaggerDoc() map[string]string { var map_LeaseSpec = map[string]string{ "": "LeaseSpec is a specification of a Lease.", "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", - "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.", + "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", "acquireTime": "acquireTime is a time when the current lease was acquired.", "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go index eb9517e1d..106ba14c3 100644 --- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file should be consistent with pkg/api/annotation_key_constants.go. +// This file should be consistent with pkg/apis/core/annotation_key_constants.go. package v1 @@ -56,9 +56,9 @@ const ( // AppArmorBetaContainerAnnotationKeyPrefix is the prefix to an annotation key specifying a container's apparmor profile. AppArmorBetaContainerAnnotationKeyPrefix = "container.apparmor.security.beta.kubernetes.io/" - // AppArmorBetaDefaultProfileAnnotatoinKey is the annotation key specifying the default AppArmor profile. + // AppArmorBetaDefaultProfileAnnotationKey is the annotation key specifying the default AppArmor profile. AppArmorBetaDefaultProfileAnnotationKey = "apparmor.security.beta.kubernetes.io/defaultProfileName" - // AppArmorBetaAllowedProfileAnnotationKey is the annotation key specifying the allowed AppArmor profiles. + // AppArmorBetaAllowedProfilesAnnotationKey is the annotation key specifying the allowed AppArmor profiles. AppArmorBetaAllowedProfilesAnnotationKey = "apparmor.security.beta.kubernetes.io/allowedProfileNames" // AppArmorBetaProfileRuntimeDefault is the profile specifying the runtime default. @@ -78,7 +78,7 @@ const ( // in the Annotations of a Node. PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" - // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache + // ObjectTTLAnnotationKey represents a suggestion for kubelet for how long it can cache // an object (e.g. secret, config map) before fetching it again from apiserver. // This annotation can be attached to node. ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" @@ -144,8 +144,19 @@ const ( // This annotation is beta-level and is only honored when PodDeletionCost feature is enabled. PodDeletionCost = "controller.kubernetes.io/pod-deletion-cost" - // AnnotationTopologyAwareHints can be used to enable or disable Topology - // Aware Hints for a Service. This may be set to "Auto" or "Disabled". Any - // other value is treated as "Disabled". - AnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints" + // DeprecatedAnnotationTopologyAwareHints can be used to enable or disable + // Topology Aware Hints for a Service. This may be set to "Auto" or + // "Disabled". Any other value is treated as "Disabled". This annotation has + // been deprecated in favor of the "service.kubernetes.io/topology-mode" + // annotation. + DeprecatedAnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints" + + // AnnotationTopologyMode can be used to enable or disable Topology Aware + // Routing for a Service. Well known values are "Auto" and "Disabled". + // Implementations may choose to develop new topology approaches, exposing + // them with domain-prefixed values. For example, "example.com/lowest-rtt" + // could be a valid implementation-specific value for this annotation. These + // heuristics will often populate topology hints on EndpointSlices, but that + // is not a requirement. + AnnotationTopologyMode = "service.kubernetes.io/topology-mode" ) diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index 3f8aaf100..c267a5feb 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -469,10 +469,38 @@ func (m *CinderVolumeSource) XXX_DiscardUnknown() { var xxx_messageInfo_CinderVolumeSource proto.InternalMessageInfo +func (m *ClaimSource) Reset() { *m = ClaimSource{} } +func (*ClaimSource) ProtoMessage() {} +func (*ClaimSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{15} +} +func (m *ClaimSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClaimSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClaimSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClaimSource.Merge(m, src) +} +func (m *ClaimSource) XXX_Size() int { + return m.Size() +} +func (m *ClaimSource) XXX_DiscardUnknown() { + xxx_messageInfo_ClaimSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ClaimSource proto.InternalMessageInfo + func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } func (*ClientIPConfig) ProtoMessage() {} func (*ClientIPConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{15} + return fileDescriptor_83c10c24ec417dc9, []int{16} } func (m *ClientIPConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -500,7 +528,7 @@ var xxx_messageInfo_ClientIPConfig proto.InternalMessageInfo func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } func (*ComponentCondition) ProtoMessage() {} func (*ComponentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{16} + return fileDescriptor_83c10c24ec417dc9, []int{17} } func (m *ComponentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -528,7 +556,7 @@ var xxx_messageInfo_ComponentCondition proto.InternalMessageInfo func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } func (*ComponentStatus) ProtoMessage() {} func (*ComponentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{17} + return fileDescriptor_83c10c24ec417dc9, []int{18} } func (m *ComponentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +584,7 @@ var xxx_messageInfo_ComponentStatus proto.InternalMessageInfo func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } func (*ComponentStatusList) ProtoMessage() {} func (*ComponentStatusList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{18} + return fileDescriptor_83c10c24ec417dc9, []int{19} } func (m *ComponentStatusList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -584,7 +612,7 @@ var xxx_messageInfo_ComponentStatusList proto.InternalMessageInfo func (m *ConfigMap) Reset() { *m = ConfigMap{} } func (*ConfigMap) ProtoMessage() {} func (*ConfigMap) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{19} + return fileDescriptor_83c10c24ec417dc9, []int{20} } func (m *ConfigMap) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +640,7 @@ var xxx_messageInfo_ConfigMap proto.InternalMessageInfo func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } func (*ConfigMapEnvSource) ProtoMessage() {} func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{20} + return fileDescriptor_83c10c24ec417dc9, []int{21} } func (m *ConfigMapEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -640,7 +668,7 @@ var xxx_messageInfo_ConfigMapEnvSource proto.InternalMessageInfo func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } func (*ConfigMapKeySelector) ProtoMessage() {} func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{21} + return fileDescriptor_83c10c24ec417dc9, []int{22} } func (m *ConfigMapKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +696,7 @@ var xxx_messageInfo_ConfigMapKeySelector proto.InternalMessageInfo func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } func (*ConfigMapList) ProtoMessage() {} func (*ConfigMapList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{22} + return fileDescriptor_83c10c24ec417dc9, []int{23} } func (m *ConfigMapList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +724,7 @@ var xxx_messageInfo_ConfigMapList proto.InternalMessageInfo func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } func (*ConfigMapNodeConfigSource) ProtoMessage() {} func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{23} + return fileDescriptor_83c10c24ec417dc9, []int{24} } func (m *ConfigMapNodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +752,7 @@ var xxx_messageInfo_ConfigMapNodeConfigSource proto.InternalMessageInfo func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } func (*ConfigMapProjection) ProtoMessage() {} func (*ConfigMapProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{24} + return fileDescriptor_83c10c24ec417dc9, []int{25} } func (m *ConfigMapProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -752,7 +780,7 @@ var xxx_messageInfo_ConfigMapProjection proto.InternalMessageInfo func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } func (*ConfigMapVolumeSource) ProtoMessage() {} func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{25} + return fileDescriptor_83c10c24ec417dc9, []int{26} } func (m *ConfigMapVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +808,7 @@ var xxx_messageInfo_ConfigMapVolumeSource proto.InternalMessageInfo func (m *Container) Reset() { *m = Container{} } func (*Container) ProtoMessage() {} func (*Container) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{26} + return fileDescriptor_83c10c24ec417dc9, []int{27} } func (m *Container) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +836,7 @@ var xxx_messageInfo_Container proto.InternalMessageInfo func (m *ContainerImage) Reset() { *m = ContainerImage{} } func (*ContainerImage) ProtoMessage() {} func (*ContainerImage) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{27} + return fileDescriptor_83c10c24ec417dc9, []int{28} } func (m *ContainerImage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -836,7 +864,7 @@ var xxx_messageInfo_ContainerImage proto.InternalMessageInfo func (m *ContainerPort) Reset() { *m = ContainerPort{} } func (*ContainerPort) ProtoMessage() {} func (*ContainerPort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{28} + return fileDescriptor_83c10c24ec417dc9, []int{29} } func (m *ContainerPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -861,10 +889,38 @@ func (m *ContainerPort) XXX_DiscardUnknown() { var xxx_messageInfo_ContainerPort proto.InternalMessageInfo +func (m *ContainerResizePolicy) Reset() { *m = ContainerResizePolicy{} } +func (*ContainerResizePolicy) ProtoMessage() {} +func (*ContainerResizePolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{30} +} +func (m *ContainerResizePolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResizePolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResizePolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResizePolicy.Merge(m, src) +} +func (m *ContainerResizePolicy) XXX_Size() int { + return m.Size() +} +func (m *ContainerResizePolicy) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResizePolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResizePolicy proto.InternalMessageInfo + func (m *ContainerState) Reset() { *m = ContainerState{} } func (*ContainerState) ProtoMessage() {} func (*ContainerState) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{29} + return fileDescriptor_83c10c24ec417dc9, []int{31} } func (m *ContainerState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +948,7 @@ var xxx_messageInfo_ContainerState proto.InternalMessageInfo func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (*ContainerStateRunning) ProtoMessage() {} func (*ContainerStateRunning) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{30} + return fileDescriptor_83c10c24ec417dc9, []int{32} } func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +976,7 @@ var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{31} + return fileDescriptor_83c10c24ec417dc9, []int{33} } func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -948,7 +1004,7 @@ var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (*ContainerStateWaiting) ProtoMessage() {} func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{32} + return fileDescriptor_83c10c24ec417dc9, []int{34} } func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +1032,7 @@ var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} func (*ContainerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{33} + return fileDescriptor_83c10c24ec417dc9, []int{35} } func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +1060,7 @@ var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (*DaemonEndpoint) ProtoMessage() {} func (*DaemonEndpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{34} + return fileDescriptor_83c10c24ec417dc9, []int{36} } func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +1088,7 @@ var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } func (*DownwardAPIProjection) ProtoMessage() {} func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{35} + return fileDescriptor_83c10c24ec417dc9, []int{37} } func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +1116,7 @@ var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } func (*DownwardAPIVolumeFile) ProtoMessage() {} func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{36} + return fileDescriptor_83c10c24ec417dc9, []int{38} } func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1088,7 +1144,7 @@ var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } func (*DownwardAPIVolumeSource) ProtoMessage() {} func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{37} + return fileDescriptor_83c10c24ec417dc9, []int{39} } func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1116,7 +1172,7 @@ var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } func (*EmptyDirVolumeSource) ProtoMessage() {} func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{38} + return fileDescriptor_83c10c24ec417dc9, []int{40} } func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1144,7 +1200,7 @@ var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } func (*EndpointAddress) ProtoMessage() {} func (*EndpointAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{39} + return fileDescriptor_83c10c24ec417dc9, []int{41} } func (m *EndpointAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1172,7 +1228,7 @@ var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} func (*EndpointPort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{40} + return fileDescriptor_83c10c24ec417dc9, []int{42} } func (m *EndpointPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1200,7 +1256,7 @@ var xxx_messageInfo_EndpointPort proto.InternalMessageInfo func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } func (*EndpointSubset) ProtoMessage() {} func (*EndpointSubset) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{41} + return fileDescriptor_83c10c24ec417dc9, []int{43} } func (m *EndpointSubset) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1228,7 +1284,7 @@ var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo func (m *Endpoints) Reset() { *m = Endpoints{} } func (*Endpoints) ProtoMessage() {} func (*Endpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{42} + return fileDescriptor_83c10c24ec417dc9, []int{44} } func (m *Endpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1256,7 +1312,7 @@ var xxx_messageInfo_Endpoints proto.InternalMessageInfo func (m *EndpointsList) Reset() { *m = EndpointsList{} } func (*EndpointsList) ProtoMessage() {} func (*EndpointsList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{43} + return fileDescriptor_83c10c24ec417dc9, []int{45} } func (m *EndpointsList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1284,7 +1340,7 @@ var xxx_messageInfo_EndpointsList proto.InternalMessageInfo func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } func (*EnvFromSource) ProtoMessage() {} func (*EnvFromSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{44} + return fileDescriptor_83c10c24ec417dc9, []int{46} } func (m *EnvFromSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1312,7 +1368,7 @@ var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo func (m *EnvVar) Reset() { *m = EnvVar{} } func (*EnvVar) ProtoMessage() {} func (*EnvVar) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{45} + return fileDescriptor_83c10c24ec417dc9, []int{47} } func (m *EnvVar) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1340,7 +1396,7 @@ var xxx_messageInfo_EnvVar proto.InternalMessageInfo func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } func (*EnvVarSource) ProtoMessage() {} func (*EnvVarSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{46} + return fileDescriptor_83c10c24ec417dc9, []int{48} } func (m *EnvVarSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1424,7 @@ var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo func (m *EphemeralContainer) Reset() { *m = EphemeralContainer{} } func (*EphemeralContainer) ProtoMessage() {} func (*EphemeralContainer) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{47} + return fileDescriptor_83c10c24ec417dc9, []int{49} } func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1396,7 +1452,7 @@ var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo func (m *EphemeralContainerCommon) Reset() { *m = EphemeralContainerCommon{} } func (*EphemeralContainerCommon) ProtoMessage() {} func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{48} + return fileDescriptor_83c10c24ec417dc9, []int{50} } func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1424,7 +1480,7 @@ var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo func (m *EphemeralVolumeSource) Reset() { *m = EphemeralVolumeSource{} } func (*EphemeralVolumeSource) ProtoMessage() {} func (*EphemeralVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{49} + return fileDescriptor_83c10c24ec417dc9, []int{51} } func (m *EphemeralVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1452,7 +1508,7 @@ var xxx_messageInfo_EphemeralVolumeSource proto.InternalMessageInfo func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{50} + return fileDescriptor_83c10c24ec417dc9, []int{52} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1480,7 +1536,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} func (*EventList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{51} + return fileDescriptor_83c10c24ec417dc9, []int{53} } func (m *EventList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1508,7 +1564,7 @@ var xxx_messageInfo_EventList proto.InternalMessageInfo func (m *EventSeries) Reset() { *m = EventSeries{} } func (*EventSeries) ProtoMessage() {} func (*EventSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{52} + return fileDescriptor_83c10c24ec417dc9, []int{54} } func (m *EventSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1536,7 +1592,7 @@ var xxx_messageInfo_EventSeries proto.InternalMessageInfo func (m *EventSource) Reset() { *m = EventSource{} } func (*EventSource) ProtoMessage() {} func (*EventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{53} + return fileDescriptor_83c10c24ec417dc9, []int{55} } func (m *EventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1564,7 +1620,7 @@ var xxx_messageInfo_EventSource proto.InternalMessageInfo func (m *ExecAction) Reset() { *m = ExecAction{} } func (*ExecAction) ProtoMessage() {} func (*ExecAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{54} + return fileDescriptor_83c10c24ec417dc9, []int{56} } func (m *ExecAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1592,7 +1648,7 @@ var xxx_messageInfo_ExecAction proto.InternalMessageInfo func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (*FCVolumeSource) ProtoMessage() {} func (*FCVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{55} + return fileDescriptor_83c10c24ec417dc9, []int{57} } func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1620,7 +1676,7 @@ var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } func (*FlexPersistentVolumeSource) ProtoMessage() {} func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{56} + return fileDescriptor_83c10c24ec417dc9, []int{58} } func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1648,7 +1704,7 @@ var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (*FlexVolumeSource) ProtoMessage() {} func (*FlexVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{57} + return fileDescriptor_83c10c24ec417dc9, []int{59} } func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1676,7 +1732,7 @@ var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (*FlockerVolumeSource) ProtoMessage() {} func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{58} + return fileDescriptor_83c10c24ec417dc9, []int{60} } func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1704,7 +1760,7 @@ var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{59} + return fileDescriptor_83c10c24ec417dc9, []int{61} } func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1732,7 +1788,7 @@ var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo func (m *GRPCAction) Reset() { *m = GRPCAction{} } func (*GRPCAction) ProtoMessage() {} func (*GRPCAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{60} + return fileDescriptor_83c10c24ec417dc9, []int{62} } func (m *GRPCAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1760,7 +1816,7 @@ var xxx_messageInfo_GRPCAction proto.InternalMessageInfo func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{61} + return fileDescriptor_83c10c24ec417dc9, []int{63} } func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1788,7 +1844,7 @@ var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{62} + return fileDescriptor_83c10c24ec417dc9, []int{64} } func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1816,7 +1872,7 @@ var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{63} + return fileDescriptor_83c10c24ec417dc9, []int{65} } func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1844,7 +1900,7 @@ var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} func (*HTTPGetAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{64} + return fileDescriptor_83c10c24ec417dc9, []int{66} } func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1872,7 +1928,7 @@ var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} func (*HTTPHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{65} + return fileDescriptor_83c10c24ec417dc9, []int{67} } func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1900,7 +1956,7 @@ var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} func (*HostAlias) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{66} + return fileDescriptor_83c10c24ec417dc9, []int{68} } func (m *HostAlias) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1925,10 +1981,38 @@ func (m *HostAlias) XXX_DiscardUnknown() { var xxx_messageInfo_HostAlias proto.InternalMessageInfo +func (m *HostIP) Reset() { *m = HostIP{} } +func (*HostIP) ProtoMessage() {} +func (*HostIP) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{69} +} +func (m *HostIP) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostIP) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostIP.Merge(m, src) +} +func (m *HostIP) XXX_Size() int { + return m.Size() +} +func (m *HostIP) XXX_DiscardUnknown() { + xxx_messageInfo_HostIP.DiscardUnknown(m) +} + +var xxx_messageInfo_HostIP proto.InternalMessageInfo + func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{67} + return fileDescriptor_83c10c24ec417dc9, []int{70} } func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1956,7 +2040,7 @@ var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } func (*ISCSIPersistentVolumeSource) ProtoMessage() {} func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{68} + return fileDescriptor_83c10c24ec417dc9, []int{71} } func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1984,7 +2068,7 @@ var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{69} + return fileDescriptor_83c10c24ec417dc9, []int{72} } func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2012,7 +2096,7 @@ var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} func (*KeyToPath) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{70} + return fileDescriptor_83c10c24ec417dc9, []int{73} } func (m *KeyToPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2040,7 +2124,7 @@ var xxx_messageInfo_KeyToPath proto.InternalMessageInfo func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} func (*Lifecycle) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{71} + return fileDescriptor_83c10c24ec417dc9, []int{74} } func (m *Lifecycle) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2068,7 +2152,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo func (m *LifecycleHandler) Reset() { *m = LifecycleHandler{} } func (*LifecycleHandler) ProtoMessage() {} func (*LifecycleHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{72} + return fileDescriptor_83c10c24ec417dc9, []int{75} } func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2096,7 +2180,7 @@ var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} func (*LimitRange) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{73} + return fileDescriptor_83c10c24ec417dc9, []int{76} } func (m *LimitRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2124,7 +2208,7 @@ var xxx_messageInfo_LimitRange proto.InternalMessageInfo func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} func (*LimitRangeItem) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{74} + return fileDescriptor_83c10c24ec417dc9, []int{77} } func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2152,7 +2236,7 @@ var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} func (*LimitRangeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{75} + return fileDescriptor_83c10c24ec417dc9, []int{78} } func (m *LimitRangeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2180,7 +2264,7 @@ var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} func (*LimitRangeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{76} + return fileDescriptor_83c10c24ec417dc9, []int{79} } func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2208,7 +2292,7 @@ var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{77} + return fileDescriptor_83c10c24ec417dc9, []int{80} } func (m *List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2236,7 +2320,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{78} + return fileDescriptor_83c10c24ec417dc9, []int{81} } func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2264,7 +2348,7 @@ var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{79} + return fileDescriptor_83c10c24ec417dc9, []int{82} } func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2292,7 +2376,7 @@ var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} func (*LocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{80} + return fileDescriptor_83c10c24ec417dc9, []int{83} } func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2320,7 +2404,7 @@ var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} func (*LocalVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{81} + return fileDescriptor_83c10c24ec417dc9, []int{84} } func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2348,7 +2432,7 @@ var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} func (*NFSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{82} + return fileDescriptor_83c10c24ec417dc9, []int{85} } func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2376,7 +2460,7 @@ var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} func (*Namespace) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{83} + return fileDescriptor_83c10c24ec417dc9, []int{86} } func (m *Namespace) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2404,7 +2488,7 @@ var xxx_messageInfo_Namespace proto.InternalMessageInfo func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} } func (*NamespaceCondition) ProtoMessage() {} func (*NamespaceCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{84} + return fileDescriptor_83c10c24ec417dc9, []int{87} } func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2432,7 +2516,7 @@ var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} func (*NamespaceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{85} + return fileDescriptor_83c10c24ec417dc9, []int{88} } func (m *NamespaceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2460,7 +2544,7 @@ var xxx_messageInfo_NamespaceList proto.InternalMessageInfo func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} func (*NamespaceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{86} + return fileDescriptor_83c10c24ec417dc9, []int{89} } func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2488,7 +2572,7 @@ var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} func (*NamespaceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{87} + return fileDescriptor_83c10c24ec417dc9, []int{90} } func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2516,7 +2600,7 @@ var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{88} + return fileDescriptor_83c10c24ec417dc9, []int{91} } func (m *Node) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2544,7 +2628,7 @@ var xxx_messageInfo_Node proto.InternalMessageInfo func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} func (*NodeAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{89} + return fileDescriptor_83c10c24ec417dc9, []int{92} } func (m *NodeAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2572,7 +2656,7 @@ var xxx_messageInfo_NodeAddress proto.InternalMessageInfo func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} func (*NodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{90} + return fileDescriptor_83c10c24ec417dc9, []int{93} } func (m *NodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2600,7 +2684,7 @@ var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} func (*NodeCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{91} + return fileDescriptor_83c10c24ec417dc9, []int{94} } func (m *NodeCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2628,7 +2712,7 @@ var xxx_messageInfo_NodeCondition proto.InternalMessageInfo func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} func (*NodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{92} + return fileDescriptor_83c10c24ec417dc9, []int{95} } func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2656,7 +2740,7 @@ var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } func (*NodeConfigStatus) ProtoMessage() {} func (*NodeConfigStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{93} + return fileDescriptor_83c10c24ec417dc9, []int{96} } func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2684,7 +2768,7 @@ var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{94} + return fileDescriptor_83c10c24ec417dc9, []int{97} } func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2712,7 +2796,7 @@ var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} func (*NodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{95} + return fileDescriptor_83c10c24ec417dc9, []int{98} } func (m *NodeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2740,7 +2824,7 @@ var xxx_messageInfo_NodeList proto.InternalMessageInfo func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} func (*NodeProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{96} + return fileDescriptor_83c10c24ec417dc9, []int{99} } func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2768,7 +2852,7 @@ var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo func (m *NodeResources) Reset() { *m = NodeResources{} } func (*NodeResources) ProtoMessage() {} func (*NodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{97} + return fileDescriptor_83c10c24ec417dc9, []int{100} } func (m *NodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2796,7 +2880,7 @@ var xxx_messageInfo_NodeResources proto.InternalMessageInfo func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} func (*NodeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{98} + return fileDescriptor_83c10c24ec417dc9, []int{101} } func (m *NodeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2824,7 +2908,7 @@ var xxx_messageInfo_NodeSelector proto.InternalMessageInfo func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{99} + return fileDescriptor_83c10c24ec417dc9, []int{102} } func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2852,7 +2936,7 @@ var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{100} + return fileDescriptor_83c10c24ec417dc9, []int{103} } func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2880,7 +2964,7 @@ var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} func (*NodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{101} + return fileDescriptor_83c10c24ec417dc9, []int{104} } func (m *NodeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2908,7 +2992,7 @@ var xxx_messageInfo_NodeSpec proto.InternalMessageInfo func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} func (*NodeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{102} + return fileDescriptor_83c10c24ec417dc9, []int{105} } func (m *NodeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2936,7 +3020,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} func (*NodeSystemInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{103} + return fileDescriptor_83c10c24ec417dc9, []int{106} } func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2964,7 +3048,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{104} + return fileDescriptor_83c10c24ec417dc9, []int{107} } func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2992,7 +3076,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} func (*ObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{105} + return fileDescriptor_83c10c24ec417dc9, []int{108} } func (m *ObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3020,7 +3104,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} func (*PersistentVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{106} + return fileDescriptor_83c10c24ec417dc9, []int{109} } func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3048,7 +3132,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{107} + return fileDescriptor_83c10c24ec417dc9, []int{110} } func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3076,7 +3160,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{108} + return fileDescriptor_83c10c24ec417dc9, []int{111} } func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3104,7 +3188,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{109} + return fileDescriptor_83c10c24ec417dc9, []int{112} } func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3132,7 +3216,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{110} + return fileDescriptor_83c10c24ec417dc9, []int{113} } func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3160,7 +3244,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{111} + return fileDescriptor_83c10c24ec417dc9, []int{114} } func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3188,7 +3272,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} } func (*PersistentVolumeClaimTemplate) ProtoMessage() {} func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{112} + return fileDescriptor_83c10c24ec417dc9, []int{115} } func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3216,7 +3300,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{113} + return fileDescriptor_83c10c24ec417dc9, []int{116} } func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3244,7 +3328,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} func (*PersistentVolumeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{114} + return fileDescriptor_83c10c24ec417dc9, []int{117} } func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3272,7 +3356,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{115} + return fileDescriptor_83c10c24ec417dc9, []int{118} } func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3300,7 +3384,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{116} + return fileDescriptor_83c10c24ec417dc9, []int{119} } func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3328,7 +3412,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{117} + return fileDescriptor_83c10c24ec417dc9, []int{120} } func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3356,7 +3440,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{118} + return fileDescriptor_83c10c24ec417dc9, []int{121} } func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3384,7 +3468,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} func (*Pod) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{119} + return fileDescriptor_83c10c24ec417dc9, []int{122} } func (m *Pod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3412,7 +3496,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} func (*PodAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{120} + return fileDescriptor_83c10c24ec417dc9, []int{123} } func (m *PodAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3440,7 +3524,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} func (*PodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{121} + return fileDescriptor_83c10c24ec417dc9, []int{124} } func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3468,7 +3552,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} func (*PodAntiAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{122} + return fileDescriptor_83c10c24ec417dc9, []int{125} } func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3496,7 +3580,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} func (*PodAttachOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{123} + return fileDescriptor_83c10c24ec417dc9, []int{126} } func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3524,7 +3608,7 @@ var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} func (*PodCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{124} + return fileDescriptor_83c10c24ec417dc9, []int{127} } func (m *PodCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3552,7 +3636,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } func (*PodDNSConfig) ProtoMessage() {} func (*PodDNSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{125} + return fileDescriptor_83c10c24ec417dc9, []int{128} } func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3580,7 +3664,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } func (*PodDNSConfigOption) ProtoMessage() {} func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{126} + return fileDescriptor_83c10c24ec417dc9, []int{129} } func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3608,7 +3692,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} func (*PodExecOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{127} + return fileDescriptor_83c10c24ec417dc9, []int{130} } func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3636,7 +3720,7 @@ var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo func (m *PodIP) Reset() { *m = PodIP{} } func (*PodIP) ProtoMessage() {} func (*PodIP) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{128} + return fileDescriptor_83c10c24ec417dc9, []int{131} } func (m *PodIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3664,7 +3748,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} func (*PodList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{129} + return fileDescriptor_83c10c24ec417dc9, []int{132} } func (m *PodList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3692,7 +3776,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} func (*PodLogOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{130} + return fileDescriptor_83c10c24ec417dc9, []int{133} } func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3720,7 +3804,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo func (m *PodOS) Reset() { *m = PodOS{} } func (*PodOS) ProtoMessage() {} func (*PodOS) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{131} + return fileDescriptor_83c10c24ec417dc9, []int{134} } func (m *PodOS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3748,7 +3832,7 @@ var xxx_messageInfo_PodOS proto.InternalMessageInfo func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{132} + return fileDescriptor_83c10c24ec417dc9, []int{135} } func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3776,7 +3860,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} func (*PodProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{133} + return fileDescriptor_83c10c24ec417dc9, []int{136} } func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3804,7 +3888,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } func (*PodReadinessGate) ProtoMessage() {} func (*PodReadinessGate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{134} + return fileDescriptor_83c10c24ec417dc9, []int{137} } func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3829,10 +3913,94 @@ func (m *PodReadinessGate) XXX_DiscardUnknown() { var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo +func (m *PodResourceClaim) Reset() { *m = PodResourceClaim{} } +func (*PodResourceClaim) ProtoMessage() {} +func (*PodResourceClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{138} +} +func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodResourceClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodResourceClaim.Merge(m, src) +} +func (m *PodResourceClaim) XXX_Size() int { + return m.Size() +} +func (m *PodResourceClaim) XXX_DiscardUnknown() { + xxx_messageInfo_PodResourceClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo + +func (m *PodResourceClaimStatus) Reset() { *m = PodResourceClaimStatus{} } +func (*PodResourceClaimStatus) ProtoMessage() {} +func (*PodResourceClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{139} +} +func (m *PodResourceClaimStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodResourceClaimStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodResourceClaimStatus.Merge(m, src) +} +func (m *PodResourceClaimStatus) XXX_Size() int { + return m.Size() +} +func (m *PodResourceClaimStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodResourceClaimStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodResourceClaimStatus proto.InternalMessageInfo + +func (m *PodSchedulingGate) Reset() { *m = PodSchedulingGate{} } +func (*PodSchedulingGate) ProtoMessage() {} +func (*PodSchedulingGate) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{140} +} +func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSchedulingGate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSchedulingGate) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingGate.Merge(m, src) +} +func (m *PodSchedulingGate) XXX_Size() int { + return m.Size() +} +func (m *PodSchedulingGate) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingGate.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo + func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} func (*PodSecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{135} + return fileDescriptor_83c10c24ec417dc9, []int{141} } func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3860,7 +4028,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} func (*PodSignature) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{136} + return fileDescriptor_83c10c24ec417dc9, []int{142} } func (m *PodSignature) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3888,7 +4056,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} func (*PodSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{137} + return fileDescriptor_83c10c24ec417dc9, []int{143} } func (m *PodSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3916,7 +4084,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} func (*PodStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{138} + return fileDescriptor_83c10c24ec417dc9, []int{144} } func (m *PodStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3944,7 +4112,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} func (*PodStatusResult) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{139} + return fileDescriptor_83c10c24ec417dc9, []int{145} } func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3972,7 +4140,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} func (*PodTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{140} + return fileDescriptor_83c10c24ec417dc9, []int{146} } func (m *PodTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4000,7 +4168,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} func (*PodTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{141} + return fileDescriptor_83c10c24ec417dc9, []int{147} } func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4028,7 +4196,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} func (*PodTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{142} + return fileDescriptor_83c10c24ec417dc9, []int{148} } func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4056,7 +4224,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo func (m *PortStatus) Reset() { *m = PortStatus{} } func (*PortStatus) ProtoMessage() {} func (*PortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{143} + return fileDescriptor_83c10c24ec417dc9, []int{149} } func (m *PortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4084,7 +4252,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{144} + return fileDescriptor_83c10c24ec417dc9, []int{150} } func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4112,7 +4280,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{145} + return fileDescriptor_83c10c24ec417dc9, []int{151} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4140,7 +4308,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{146} + return fileDescriptor_83c10c24ec417dc9, []int{152} } func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4168,7 +4336,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{147} + return fileDescriptor_83c10c24ec417dc9, []int{153} } func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4196,7 +4364,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{148} + return fileDescriptor_83c10c24ec417dc9, []int{154} } func (m *Probe) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4224,7 +4392,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo func (m *ProbeHandler) Reset() { *m = ProbeHandler{} } func (*ProbeHandler) ProtoMessage() {} func (*ProbeHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{149} + return fileDescriptor_83c10c24ec417dc9, []int{155} } func (m *ProbeHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4252,7 +4420,7 @@ var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{150} + return fileDescriptor_83c10c24ec417dc9, []int{156} } func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4280,7 +4448,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{151} + return fileDescriptor_83c10c24ec417dc9, []int{157} } func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4308,7 +4476,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{152} + return fileDescriptor_83c10c24ec417dc9, []int{158} } func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4336,7 +4504,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} func (*RBDVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{153} + return fileDescriptor_83c10c24ec417dc9, []int{159} } func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4364,7 +4532,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} func (*RangeAllocation) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{154} + return fileDescriptor_83c10c24ec417dc9, []int{160} } func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4392,7 +4560,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} func (*ReplicationController) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{155} + return fileDescriptor_83c10c24ec417dc9, []int{161} } func (m *ReplicationController) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4420,7 +4588,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{156} + return fileDescriptor_83c10c24ec417dc9, []int{162} } func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4448,7 +4616,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{157} + return fileDescriptor_83c10c24ec417dc9, []int{163} } func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4476,7 +4644,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{158} + return fileDescriptor_83c10c24ec417dc9, []int{164} } func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4504,7 +4672,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{159} + return fileDescriptor_83c10c24ec417dc9, []int{165} } func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4529,10 +4697,38 @@ func (m *ReplicationControllerStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo +func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } +func (*ResourceClaim) ProtoMessage() {} +func (*ResourceClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{166} +} +func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaim.Merge(m, src) +} +func (m *ResourceClaim) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaim) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo + func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{160} + return fileDescriptor_83c10c24ec417dc9, []int{167} } func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4560,7 +4756,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} func (*ResourceQuota) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{161} + return fileDescriptor_83c10c24ec417dc9, []int{168} } func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4588,7 +4784,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} func (*ResourceQuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{162} + return fileDescriptor_83c10c24ec417dc9, []int{169} } func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4616,7 +4812,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{163} + return fileDescriptor_83c10c24ec417dc9, []int{170} } func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4644,7 +4840,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{164} + return fileDescriptor_83c10c24ec417dc9, []int{171} } func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4672,7 +4868,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} func (*ResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{165} + return fileDescriptor_83c10c24ec417dc9, []int{172} } func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4700,7 +4896,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} func (*SELinuxOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{166} + return fileDescriptor_83c10c24ec417dc9, []int{173} } func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4728,7 +4924,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{167} + return fileDescriptor_83c10c24ec417dc9, []int{174} } func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4756,7 +4952,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{168} + return fileDescriptor_83c10c24ec417dc9, []int{175} } func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4784,7 +4980,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} func (*ScopeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{169} + return fileDescriptor_83c10c24ec417dc9, []int{176} } func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4812,7 +5008,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{170} + return fileDescriptor_83c10c24ec417dc9, []int{177} } func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4840,7 +5036,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo func (m *SeccompProfile) Reset() { *m = SeccompProfile{} } func (*SeccompProfile) ProtoMessage() {} func (*SeccompProfile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{171} + return fileDescriptor_83c10c24ec417dc9, []int{178} } func (m *SeccompProfile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4868,7 +5064,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{172} + return fileDescriptor_83c10c24ec417dc9, []int{179} } func (m *Secret) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4896,7 +5092,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} func (*SecretEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{173} + return fileDescriptor_83c10c24ec417dc9, []int{180} } func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4924,7 +5120,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} func (*SecretKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{174} + return fileDescriptor_83c10c24ec417dc9, []int{181} } func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4952,7 +5148,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{175} + return fileDescriptor_83c10c24ec417dc9, []int{182} } func (m *SecretList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4980,7 +5176,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} func (*SecretProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{176} + return fileDescriptor_83c10c24ec417dc9, []int{183} } func (m *SecretProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5008,7 +5204,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} func (*SecretReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{177} + return fileDescriptor_83c10c24ec417dc9, []int{184} } func (m *SecretReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5036,7 +5232,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} func (*SecretVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{178} + return fileDescriptor_83c10c24ec417dc9, []int{185} } func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5064,7 +5260,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} func (*SecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{179} + return fileDescriptor_83c10c24ec417dc9, []int{186} } func (m *SecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5092,7 +5288,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} func (*SerializedReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{180} + return fileDescriptor_83c10c24ec417dc9, []int{187} } func (m *SerializedReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5120,7 +5316,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{181} + return fileDescriptor_83c10c24ec417dc9, []int{188} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5148,7 +5344,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} func (*ServiceAccount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{182} + return fileDescriptor_83c10c24ec417dc9, []int{189} } func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5176,7 +5372,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} func (*ServiceAccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{183} + return fileDescriptor_83c10c24ec417dc9, []int{190} } func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5204,7 +5400,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{184} + return fileDescriptor_83c10c24ec417dc9, []int{191} } func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5232,7 +5428,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{185} + return fileDescriptor_83c10c24ec417dc9, []int{192} } func (m *ServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5260,7 +5456,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{186} + return fileDescriptor_83c10c24ec417dc9, []int{193} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5288,7 +5484,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{187} + return fileDescriptor_83c10c24ec417dc9, []int{194} } func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5316,7 +5512,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{188} + return fileDescriptor_83c10c24ec417dc9, []int{195} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5344,7 +5540,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{189} + return fileDescriptor_83c10c24ec417dc9, []int{196} } func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5372,7 +5568,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{190} + return fileDescriptor_83c10c24ec417dc9, []int{197} } func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5400,7 +5596,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{191} + return fileDescriptor_83c10c24ec417dc9, []int{198} } func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5428,7 +5624,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{192} + return fileDescriptor_83c10c24ec417dc9, []int{199} } func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5456,7 +5652,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{193} + return fileDescriptor_83c10c24ec417dc9, []int{200} } func (m *Sysctl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5484,7 +5680,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{194} + return fileDescriptor_83c10c24ec417dc9, []int{201} } func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5512,7 +5708,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{195} + return fileDescriptor_83c10c24ec417dc9, []int{202} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5540,7 +5736,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{196} + return fileDescriptor_83c10c24ec417dc9, []int{203} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5568,7 +5764,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{197} + return fileDescriptor_83c10c24ec417dc9, []int{204} } func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5596,7 +5792,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{198} + return fileDescriptor_83c10c24ec417dc9, []int{205} } func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5624,7 +5820,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } func (*TopologySpreadConstraint) ProtoMessage() {} func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{199} + return fileDescriptor_83c10c24ec417dc9, []int{206} } func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5652,7 +5848,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{200} + return fileDescriptor_83c10c24ec417dc9, []int{207} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5677,10 +5873,38 @@ func (m *TypedLocalObjectReference) XXX_DiscardUnknown() { var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo +func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} } +func (*TypedObjectReference) ProtoMessage() {} +func (*TypedObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{208} +} +func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypedObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypedObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypedObjectReference.Merge(m, src) +} +func (m *TypedObjectReference) XXX_Size() int { + return m.Size() +} +func (m *TypedObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_TypedObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo + func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{201} + return fileDescriptor_83c10c24ec417dc9, []int{209} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +5932,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{202} + return fileDescriptor_83c10c24ec417dc9, []int{210} } func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5736,7 +5960,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{203} + return fileDescriptor_83c10c24ec417dc9, []int{211} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5764,7 +5988,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{204} + return fileDescriptor_83c10c24ec417dc9, []int{212} } func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5792,7 +6016,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{205} + return fileDescriptor_83c10c24ec417dc9, []int{213} } func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5820,7 +6044,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{206} + return fileDescriptor_83c10c24ec417dc9, []int{214} } func (m *VolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5848,7 +6072,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{207} + return fileDescriptor_83c10c24ec417dc9, []int{215} } func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5876,7 +6100,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{208} + return fileDescriptor_83c10c24ec417dc9, []int{216} } func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5904,7 +6128,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } func (*WindowsSecurityContextOptions) ProtoMessage() {} func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{209} + return fileDescriptor_83c10c24ec417dc9, []int{217} } func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5947,6 +6171,7 @@ func init() { proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource") proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") + proto.RegisterType((*ClaimSource)(nil), "k8s.io.api.core.v1.ClaimSource") proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus") @@ -5963,11 +6188,13 @@ func init() { proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") + proto.RegisterType((*ContainerResizePolicy)(nil), "k8s.io.api.core.v1.ContainerResizePolicy") proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ContainerStatus.AllocatedResourcesEntry") proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") @@ -6003,6 +6230,7 @@ func init() { proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") + proto.RegisterType((*HostIP)(nil), "k8s.io.api.core.v1.HostIP") proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") @@ -6056,6 +6284,7 @@ func init() { proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList") proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec") proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus") + proto.RegisterMapType((map[ResourceName]ClaimResourceStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.AllocatedResourceStatusesEntry") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.AllocatedResourcesEntry") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.CapacityEntry") proto.RegisterType((*PersistentVolumeClaimTemplate)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimTemplate") @@ -6082,6 +6311,9 @@ func init() { proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions") proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions") proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate") + proto.RegisterType((*PodResourceClaim)(nil), "k8s.io.api.core.v1.PodResourceClaim") + proto.RegisterType((*PodResourceClaimStatus)(nil), "k8s.io.api.core.v1.PodResourceClaimStatus") + proto.RegisterType((*PodSchedulingGate)(nil), "k8s.io.api.core.v1.PodSchedulingGate") proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.api.core.v1.PodSecurityContext") proto.RegisterType((*PodSignature)(nil), "k8s.io.api.core.v1.PodSignature") proto.RegisterType((*PodSpec)(nil), "k8s.io.api.core.v1.PodSpec") @@ -6110,6 +6342,7 @@ func init() { proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec.SelectorEntry") proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus") + proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.core.v1.ResourceClaim") proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector") proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota") proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList") @@ -6159,6 +6392,7 @@ func init() { proto.RegisterType((*TopologySelectorTerm)(nil), "k8s.io.api.core.v1.TopologySelectorTerm") proto.RegisterType((*TopologySpreadConstraint)(nil), "k8s.io.api.core.v1.TopologySpreadConstraint") proto.RegisterType((*TypedLocalObjectReference)(nil), "k8s.io.api.core.v1.TypedLocalObjectReference") + proto.RegisterType((*TypedObjectReference)(nil), "k8s.io.api.core.v1.TypedObjectReference") proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume") proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice") proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") @@ -6175,899 +6409,934 @@ func init() { } var fileDescriptor_83c10c24ec417dc9 = []byte{ - // 14260 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x69, 0x70, 0x24, 0xd7, - 0x79, 0x98, 0x7a, 0x06, 0xd7, 0x7c, 0xb8, 0xdf, 0x1e, 0xc4, 0x82, 0xdc, 0xc5, 0xb2, 0x29, 0x2d, - 0x97, 0x22, 0x89, 0xd5, 0xf2, 0x90, 0x68, 0x52, 0xa2, 0x05, 0x60, 0x80, 0x5d, 0x70, 0x17, 0xd8, - 0xe1, 0x1b, 0xec, 0xae, 0x24, 0x53, 0x2a, 0x35, 0x66, 0x1e, 0x80, 0x16, 0x66, 0xba, 0x87, 0xdd, - 0x3d, 0xd8, 0x05, 0x23, 0x57, 0x1c, 0xf9, 0x94, 0x8f, 0x94, 0x2a, 0xe5, 0x1c, 0x25, 0xbb, 0x5c, - 0x29, 0xc7, 0x89, 0xad, 0x28, 0x97, 0x23, 0xc7, 0x76, 0x2c, 0x27, 0x76, 0x6e, 0x27, 0x3f, 0x6c, - 0xc7, 0x55, 0xb1, 0x5c, 0xe5, 0x0a, 0x62, 0xaf, 0x53, 0xe5, 0x52, 0x55, 0x62, 0x3b, 0x71, 0xf2, - 0x23, 0x1b, 0x57, 0x9c, 0x7a, 0x67, 0xbf, 0xd7, 0xd7, 0x0c, 0x96, 0x58, 0x88, 0x52, 0xf1, 0xdf, - 0xcc, 0xfb, 0xbe, 0xf7, 0xbd, 0xd7, 0xef, 0xfc, 0xde, 0x77, 0xc2, 0x2b, 0xbb, 0x2f, 0x85, 0xf3, - 0xae, 0x7f, 0x69, 0xb7, 0xbb, 0x49, 0x02, 0x8f, 0x44, 0x24, 0xbc, 0xb4, 0x47, 0xbc, 0xa6, 0x1f, - 0x5c, 0x12, 0x00, 0xa7, 0xe3, 0x5e, 0x6a, 0xf8, 0x01, 0xb9, 0xb4, 0x77, 0xf9, 0xd2, 0x36, 0xf1, - 0x48, 0xe0, 0x44, 0xa4, 0x39, 0xdf, 0x09, 0xfc, 0xc8, 0x47, 0x88, 0xe3, 0xcc, 0x3b, 0x1d, 0x77, - 0x9e, 0xe2, 0xcc, 0xef, 0x5d, 0x9e, 0x7d, 0x76, 0xdb, 0x8d, 0x76, 0xba, 0x9b, 0xf3, 0x0d, 0xbf, - 0x7d, 0x69, 0xdb, 0xdf, 0xf6, 0x2f, 0x31, 0xd4, 0xcd, 0xee, 0x16, 0xfb, 0xc7, 0xfe, 0xb0, 0x5f, - 0x9c, 0xc4, 0xec, 0x0b, 0x71, 0x33, 0x6d, 0xa7, 0xb1, 0xe3, 0x7a, 0x24, 0xd8, 0xbf, 0xd4, 0xd9, - 0xdd, 0x66, 0xed, 0x06, 0x24, 0xf4, 0xbb, 0x41, 0x83, 0x24, 0x1b, 0x2e, 0xac, 0x15, 0x5e, 0x6a, - 0x93, 0xc8, 0xc9, 0xe8, 0xee, 0xec, 0xa5, 0xbc, 0x5a, 0x41, 0xd7, 0x8b, 0xdc, 0x76, 0xba, 0x99, - 0x0f, 0xf6, 0xaa, 0x10, 0x36, 0x76, 0x48, 0xdb, 0x49, 0xd5, 0x7b, 0x3e, 0xaf, 0x5e, 0x37, 0x72, - 0x5b, 0x97, 0x5c, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc9, 0xfe, 0x9a, 0x05, 0xe7, 0x17, 0x6e, 0xd7, - 0x97, 0x5b, 0x4e, 0x18, 0xb9, 0x8d, 0xc5, 0x96, 0xdf, 0xd8, 0xad, 0x47, 0x7e, 0x40, 0x6e, 0xf9, - 0xad, 0x6e, 0x9b, 0xd4, 0xd9, 0x40, 0xa0, 0x67, 0x60, 0x64, 0x8f, 0xfd, 0x5f, 0xad, 0xce, 0x58, - 0xe7, 0xad, 0x8b, 0x95, 0xc5, 0xa9, 0x5f, 0x3b, 0x98, 0x7b, 0xcf, 0xbd, 0x83, 0xb9, 0x91, 0x5b, - 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x01, 0x86, 0xb6, 0xc2, 0x8d, 0xfd, 0x0e, 0x99, 0x29, 0x31, 0xdc, - 0x09, 0x81, 0x3b, 0xb4, 0x52, 0xa7, 0xa5, 0x58, 0x40, 0xd1, 0x25, 0xa8, 0x74, 0x9c, 0x20, 0x72, - 0x23, 0xd7, 0xf7, 0x66, 0xca, 0xe7, 0xad, 0x8b, 0x83, 0x8b, 0xd3, 0x02, 0xb5, 0x52, 0x93, 0x00, - 0x1c, 0xe3, 0xd0, 0x6e, 0x04, 0xc4, 0x69, 0xde, 0xf0, 0x5a, 0xfb, 0x33, 0x03, 0xe7, 0xad, 0x8b, - 0x23, 0x71, 0x37, 0xb0, 0x28, 0xc7, 0x0a, 0xc3, 0xfe, 0x62, 0x09, 0x46, 0x16, 0xb6, 0xb6, 0x5c, - 0xcf, 0x8d, 0xf6, 0xd1, 0x2d, 0x18, 0xf3, 0xfc, 0x26, 0x91, 0xff, 0xd9, 0x57, 0x8c, 0x3e, 0x77, - 0x7e, 0x3e, 0xbd, 0x94, 0xe6, 0xd7, 0x35, 0xbc, 0xc5, 0xa9, 0x7b, 0x07, 0x73, 0x63, 0x7a, 0x09, - 0x36, 0xe8, 0x20, 0x0c, 0xa3, 0x1d, 0xbf, 0xa9, 0xc8, 0x96, 0x18, 0xd9, 0xb9, 0x2c, 0xb2, 0xb5, - 0x18, 0x6d, 0x71, 0xf2, 0xde, 0xc1, 0xdc, 0xa8, 0x56, 0x80, 0x75, 0x22, 0x68, 0x13, 0x26, 0xe9, - 0x5f, 0x2f, 0x72, 0x15, 0xdd, 0x32, 0xa3, 0xfb, 0x44, 0x1e, 0x5d, 0x0d, 0x75, 0xf1, 0xc4, 0xbd, - 0x83, 0xb9, 0xc9, 0x44, 0x21, 0x4e, 0x12, 0xb4, 0xdf, 0x82, 0x89, 0x85, 0x28, 0x72, 0x1a, 0x3b, - 0xa4, 0xc9, 0x67, 0x10, 0xbd, 0x00, 0x03, 0x9e, 0xd3, 0x26, 0x62, 0x7e, 0xcf, 0x8b, 0x81, 0x1d, - 0x58, 0x77, 0xda, 0xe4, 0xfe, 0xc1, 0xdc, 0xd4, 0x4d, 0xcf, 0x7d, 0xb3, 0x2b, 0x56, 0x05, 0x2d, - 0xc3, 0x0c, 0x1b, 0x3d, 0x07, 0xd0, 0x24, 0x7b, 0x6e, 0x83, 0xd4, 0x9c, 0x68, 0x47, 0xcc, 0x37, - 0x12, 0x75, 0xa1, 0xaa, 0x20, 0x58, 0xc3, 0xb2, 0xef, 0x42, 0x65, 0x61, 0xcf, 0x77, 0x9b, 0x35, - 0xbf, 0x19, 0xa2, 0x5d, 0x98, 0xec, 0x04, 0x64, 0x8b, 0x04, 0xaa, 0x68, 0xc6, 0x3a, 0x5f, 0xbe, - 0x38, 0xfa, 0xdc, 0xc5, 0xcc, 0x8f, 0x35, 0x51, 0x97, 0xbd, 0x28, 0xd8, 0x5f, 0x7c, 0x44, 0xb4, - 0x37, 0x99, 0x80, 0xe2, 0x24, 0x65, 0xfb, 0x5f, 0x97, 0xe0, 0xd4, 0xc2, 0x5b, 0xdd, 0x80, 0x54, - 0xdd, 0x70, 0x37, 0xb9, 0xc2, 0x9b, 0x6e, 0xb8, 0xbb, 0x1e, 0x8f, 0x80, 0x5a, 0x5a, 0x55, 0x51, - 0x8e, 0x15, 0x06, 0x7a, 0x16, 0x86, 0xe9, 0xef, 0x9b, 0x78, 0x55, 0x7c, 0xf2, 0x09, 0x81, 0x3c, - 0x5a, 0x75, 0x22, 0xa7, 0xca, 0x41, 0x58, 0xe2, 0xa0, 0x35, 0x18, 0x6d, 0xb0, 0x0d, 0xb9, 0xbd, - 0xe6, 0x37, 0x09, 0x9b, 0xcc, 0xca, 0xe2, 0xd3, 0x14, 0x7d, 0x29, 0x2e, 0xbe, 0x7f, 0x30, 0x37, - 0xc3, 0xfb, 0x26, 0x48, 0x68, 0x30, 0xac, 0xd7, 0x47, 0xb6, 0xda, 0x5f, 0x03, 0x8c, 0x12, 0x64, - 0xec, 0xad, 0x8b, 0xda, 0x56, 0x19, 0x64, 0x5b, 0x65, 0x2c, 0x7b, 0x9b, 0xa0, 0xcb, 0x30, 0xb0, - 0xeb, 0x7a, 0xcd, 0x99, 0x21, 0x46, 0xeb, 0x2c, 0x9d, 0xf3, 0x6b, 0xae, 0xd7, 0xbc, 0x7f, 0x30, - 0x37, 0x6d, 0x74, 0x87, 0x16, 0x62, 0x86, 0x6a, 0xff, 0xa9, 0x05, 0x73, 0x0c, 0xb6, 0xe2, 0xb6, - 0x48, 0x8d, 0x04, 0xa1, 0x1b, 0x46, 0xc4, 0x8b, 0x8c, 0x01, 0x7d, 0x0e, 0x20, 0x24, 0x8d, 0x80, - 0x44, 0xda, 0x90, 0xaa, 0x85, 0x51, 0x57, 0x10, 0xac, 0x61, 0xd1, 0x03, 0x21, 0xdc, 0x71, 0x02, - 0xb6, 0xbe, 0xc4, 0xc0, 0xaa, 0x03, 0xa1, 0x2e, 0x01, 0x38, 0xc6, 0x31, 0x0e, 0x84, 0x72, 0xaf, - 0x03, 0x01, 0x7d, 0x04, 0x26, 0xe3, 0xc6, 0xc2, 0x8e, 0xd3, 0x90, 0x03, 0xc8, 0xb6, 0x4c, 0xdd, - 0x04, 0xe1, 0x24, 0xae, 0xfd, 0x77, 0x2d, 0xb1, 0x78, 0xe8, 0x57, 0xbf, 0xc3, 0xbf, 0xd5, 0xfe, - 0x45, 0x0b, 0x86, 0x17, 0x5d, 0xaf, 0xe9, 0x7a, 0xdb, 0xe8, 0xd3, 0x30, 0x42, 0xef, 0xa6, 0xa6, - 0x13, 0x39, 0xe2, 0xdc, 0xfb, 0x80, 0xb6, 0xb7, 0xd4, 0x55, 0x31, 0xdf, 0xd9, 0xdd, 0xa6, 0x05, - 0xe1, 0x3c, 0xc5, 0xa6, 0xbb, 0xed, 0xc6, 0xe6, 0x67, 0x48, 0x23, 0x5a, 0x23, 0x91, 0x13, 0x7f, - 0x4e, 0x5c, 0x86, 0x15, 0x55, 0x74, 0x0d, 0x86, 0x22, 0x27, 0xd8, 0x26, 0x91, 0x38, 0x00, 0x33, - 0x0f, 0x2a, 0x5e, 0x13, 0xd3, 0x1d, 0x49, 0xbc, 0x06, 0x89, 0xaf, 0x85, 0x0d, 0x56, 0x15, 0x0b, - 0x12, 0xf6, 0x8f, 0x0c, 0xc3, 0x99, 0xa5, 0xfa, 0x6a, 0xce, 0xba, 0xba, 0x00, 0x43, 0xcd, 0xc0, - 0xdd, 0x23, 0x81, 0x18, 0x67, 0x45, 0xa5, 0xca, 0x4a, 0xb1, 0x80, 0xa2, 0x97, 0x60, 0x8c, 0x5f, - 0x48, 0x57, 0x1d, 0xaf, 0xd9, 0x92, 0x43, 0x7c, 0x52, 0x60, 0x8f, 0xdd, 0xd2, 0x60, 0xd8, 0xc0, - 0x3c, 0xe4, 0xa2, 0xba, 0x90, 0xd8, 0x8c, 0x79, 0x97, 0xdd, 0xe7, 0x2d, 0x98, 0xe2, 0xcd, 0x2c, - 0x44, 0x51, 0xe0, 0x6e, 0x76, 0x23, 0x12, 0xce, 0x0c, 0xb2, 0x93, 0x6e, 0x29, 0x6b, 0xb4, 0x72, - 0x47, 0x60, 0xfe, 0x56, 0x82, 0x0a, 0x3f, 0x04, 0x67, 0x44, 0xbb, 0x53, 0x49, 0x30, 0x4e, 0x35, - 0x8b, 0xbe, 0xdb, 0x82, 0xd9, 0x86, 0xef, 0x45, 0x81, 0xdf, 0x6a, 0x91, 0xa0, 0xd6, 0xdd, 0x6c, - 0xb9, 0xe1, 0x0e, 0x5f, 0xa7, 0x98, 0x6c, 0xb1, 0x93, 0x20, 0x67, 0x0e, 0x15, 0x92, 0x98, 0xc3, - 0x73, 0xf7, 0x0e, 0xe6, 0x66, 0x97, 0x72, 0x49, 0xe1, 0x82, 0x66, 0xd0, 0x2e, 0x20, 0x7a, 0x95, - 0xd6, 0x23, 0x67, 0x9b, 0xc4, 0x8d, 0x0f, 0xf7, 0xdf, 0xf8, 0xe9, 0x7b, 0x07, 0x73, 0x68, 0x3d, - 0x45, 0x02, 0x67, 0x90, 0x45, 0x6f, 0xc2, 0x49, 0x5a, 0x9a, 0xfa, 0xd6, 0x91, 0xfe, 0x9b, 0x9b, - 0xb9, 0x77, 0x30, 0x77, 0x72, 0x3d, 0x83, 0x08, 0xce, 0x24, 0x8d, 0xbe, 0xcb, 0x82, 0x33, 0xf1, - 0xe7, 0x2f, 0xdf, 0xed, 0x38, 0x5e, 0x33, 0x6e, 0xb8, 0xd2, 0x7f, 0xc3, 0xf4, 0x4c, 0x3e, 0xb3, - 0x94, 0x47, 0x09, 0xe7, 0x37, 0x32, 0xbb, 0x04, 0xa7, 0x32, 0x57, 0x0b, 0x9a, 0x82, 0xf2, 0x2e, - 0xe1, 0x5c, 0x50, 0x05, 0xd3, 0x9f, 0xe8, 0x24, 0x0c, 0xee, 0x39, 0xad, 0xae, 0xd8, 0x28, 0x98, - 0xff, 0x79, 0xb9, 0xf4, 0x92, 0x65, 0xff, 0x9b, 0x32, 0x4c, 0x2e, 0xd5, 0x57, 0x1f, 0x68, 0x17, - 0xea, 0xd7, 0x50, 0xa9, 0xf0, 0x1a, 0x8a, 0x2f, 0xb5, 0x72, 0xee, 0xa5, 0xf6, 0x17, 0x33, 0xb6, - 0xd0, 0x00, 0xdb, 0x42, 0xdf, 0x96, 0xb3, 0x85, 0x8e, 0x78, 0xe3, 0xec, 0xe5, 0xac, 0xa2, 0x41, - 0x36, 0x99, 0x99, 0x1c, 0xcb, 0x75, 0xbf, 0xe1, 0xb4, 0x92, 0x47, 0xdf, 0x21, 0x97, 0xd2, 0xd1, - 0xcc, 0x63, 0x03, 0xc6, 0x96, 0x9c, 0x8e, 0xb3, 0xe9, 0xb6, 0xdc, 0xc8, 0x25, 0x21, 0x7a, 0x12, - 0xca, 0x4e, 0xb3, 0xc9, 0xb8, 0xad, 0xca, 0xe2, 0xa9, 0x7b, 0x07, 0x73, 0xe5, 0x85, 0x26, 0xbd, - 0xf6, 0x41, 0x61, 0xed, 0x63, 0x8a, 0x81, 0xde, 0x0f, 0x03, 0xcd, 0xc0, 0xef, 0xcc, 0x94, 0x18, - 0x26, 0xdd, 0x75, 0x03, 0xd5, 0xc0, 0xef, 0x24, 0x50, 0x19, 0x8e, 0xfd, 0xab, 0x25, 0x78, 0x6c, - 0x89, 0x74, 0x76, 0x56, 0xea, 0x39, 0xe7, 0xf7, 0x45, 0x18, 0x69, 0xfb, 0x9e, 0x1b, 0xf9, 0x41, - 0x28, 0x9a, 0x66, 0x2b, 0x62, 0x4d, 0x94, 0x61, 0x05, 0x45, 0xe7, 0x61, 0xa0, 0x13, 0x33, 0x95, - 0x63, 0x92, 0x21, 0x65, 0xec, 0x24, 0x83, 0x50, 0x8c, 0x6e, 0x48, 0x02, 0xb1, 0x62, 0x14, 0xc6, - 0xcd, 0x90, 0x04, 0x98, 0x41, 0xe2, 0x9b, 0x99, 0xde, 0xd9, 0xe2, 0x84, 0x4e, 0xdc, 0xcc, 0x14, - 0x82, 0x35, 0x2c, 0x54, 0x83, 0x4a, 0x98, 0x98, 0xd9, 0xbe, 0xb6, 0xe9, 0x38, 0xbb, 0xba, 0xd5, - 0x4c, 0xc6, 0x44, 0x8c, 0x1b, 0x65, 0xa8, 0xe7, 0xd5, 0xfd, 0xd5, 0x12, 0x20, 0x3e, 0x84, 0xdf, - 0x64, 0x03, 0x77, 0x33, 0x3d, 0x70, 0xfd, 0x6f, 0x89, 0xa3, 0x1a, 0xbd, 0xff, 0x65, 0xc1, 0x63, - 0x4b, 0xae, 0xd7, 0x24, 0x41, 0xce, 0x02, 0x7c, 0x38, 0x6f, 0xd9, 0xc3, 0x31, 0x0d, 0xc6, 0x12, - 0x1b, 0x38, 0x82, 0x25, 0x66, 0xff, 0xb1, 0x05, 0x88, 0x7f, 0xf6, 0x3b, 0xee, 0x63, 0x6f, 0xa6, - 0x3f, 0xf6, 0x08, 0x96, 0x85, 0x7d, 0x1d, 0x26, 0x96, 0x5a, 0x2e, 0xf1, 0xa2, 0xd5, 0xda, 0x92, - 0xef, 0x6d, 0xb9, 0xdb, 0xe8, 0x65, 0x98, 0x88, 0xdc, 0x36, 0xf1, 0xbb, 0x51, 0x9d, 0x34, 0x7c, - 0x8f, 0xbd, 0x24, 0xad, 0x8b, 0x83, 0x8b, 0xe8, 0xde, 0xc1, 0xdc, 0xc4, 0x86, 0x01, 0xc1, 0x09, - 0x4c, 0xfb, 0x77, 0xe9, 0xf8, 0xf9, 0xed, 0x8e, 0xef, 0x11, 0x2f, 0x5a, 0xf2, 0xbd, 0x26, 0x97, - 0x38, 0xbc, 0x0c, 0x03, 0x11, 0x1d, 0x0f, 0x3e, 0x76, 0x17, 0xe4, 0x46, 0xa1, 0xa3, 0x70, 0xff, - 0x60, 0xee, 0x74, 0xba, 0x06, 0x1b, 0x27, 0x56, 0x07, 0x7d, 0x1b, 0x0c, 0x85, 0x91, 0x13, 0x75, - 0x43, 0x31, 0x9a, 0x8f, 0xcb, 0xd1, 0xac, 0xb3, 0xd2, 0xfb, 0x07, 0x73, 0x93, 0xaa, 0x1a, 0x2f, - 0xc2, 0xa2, 0x02, 0x7a, 0x0a, 0x86, 0xdb, 0x24, 0x0c, 0x9d, 0x6d, 0x79, 0x1b, 0x4e, 0x8a, 0xba, - 0xc3, 0x6b, 0xbc, 0x18, 0x4b, 0x38, 0x7a, 0x02, 0x06, 0x49, 0x10, 0xf8, 0x81, 0xd8, 0xa3, 0xe3, - 0x02, 0x71, 0x70, 0x99, 0x16, 0x62, 0x0e, 0xb3, 0x7f, 0xc3, 0x82, 0x49, 0xd5, 0x57, 0xde, 0xd6, - 0x31, 0xbc, 0x0a, 0x3e, 0x01, 0xd0, 0x90, 0x1f, 0x18, 0xb2, 0xdb, 0x63, 0xf4, 0xb9, 0x0b, 0x99, - 0x17, 0x75, 0x6a, 0x18, 0x63, 0xca, 0xaa, 0x28, 0xc4, 0x1a, 0x35, 0xfb, 0x9f, 0x59, 0x70, 0x22, - 0xf1, 0x45, 0xd7, 0xdd, 0x30, 0x42, 0x6f, 0xa4, 0xbe, 0x6a, 0xbe, 0xbf, 0xaf, 0xa2, 0xb5, 0xd9, - 0x37, 0xa9, 0xa5, 0x2c, 0x4b, 0xb4, 0x2f, 0xba, 0x0a, 0x83, 0x6e, 0x44, 0xda, 0xf2, 0x63, 0x9e, - 0x28, 0xfc, 0x18, 0xde, 0xab, 0x78, 0x46, 0x56, 0x69, 0x4d, 0xcc, 0x09, 0xd8, 0xbf, 0x5a, 0x86, - 0x0a, 0x5f, 0xb6, 0x6b, 0x4e, 0xe7, 0x18, 0xe6, 0xe2, 0x69, 0xa8, 0xb8, 0xed, 0x76, 0x37, 0x72, - 0x36, 0xc5, 0x71, 0x3e, 0xc2, 0xb7, 0xd6, 0xaa, 0x2c, 0xc4, 0x31, 0x1c, 0xad, 0xc2, 0x00, 0xeb, - 0x0a, 0xff, 0xca, 0x27, 0xb3, 0xbf, 0x52, 0xf4, 0x7d, 0xbe, 0xea, 0x44, 0x0e, 0xe7, 0xa4, 0xd4, - 0x3d, 0x42, 0x8b, 0x30, 0x23, 0x81, 0x1c, 0x80, 0x4d, 0xd7, 0x73, 0x82, 0x7d, 0x5a, 0x36, 0x53, - 0x66, 0x04, 0x9f, 0x2d, 0x26, 0xb8, 0xa8, 0xf0, 0x39, 0x59, 0xf5, 0x61, 0x31, 0x00, 0x6b, 0x44, - 0x67, 0x3f, 0x04, 0x15, 0x85, 0x7c, 0x18, 0x86, 0x68, 0xf6, 0x23, 0x30, 0x99, 0x68, 0xab, 0x57, - 0xf5, 0x31, 0x9d, 0x9f, 0xfa, 0x25, 0x76, 0x64, 0x88, 0x5e, 0x2f, 0x7b, 0x7b, 0xe2, 0xc8, 0x7d, - 0x0b, 0x4e, 0xb6, 0x32, 0x4e, 0x32, 0x31, 0xaf, 0xfd, 0x9f, 0x7c, 0x8f, 0x89, 0xcf, 0x3e, 0x99, - 0x05, 0xc5, 0x99, 0x6d, 0x50, 0x1e, 0xc1, 0xef, 0xd0, 0x0d, 0xe2, 0xb4, 0x74, 0x76, 0xfb, 0x86, - 0x28, 0xc3, 0x0a, 0x4a, 0xcf, 0xbb, 0x93, 0xaa, 0xf3, 0xd7, 0xc8, 0x7e, 0x9d, 0xb4, 0x48, 0x23, - 0xf2, 0x83, 0x6f, 0x68, 0xf7, 0xcf, 0xf2, 0xd1, 0xe7, 0xc7, 0xe5, 0xa8, 0x20, 0x50, 0xbe, 0x46, - 0xf6, 0xf9, 0x54, 0xe8, 0x5f, 0x57, 0x2e, 0xfc, 0xba, 0x9f, 0xb5, 0x60, 0x5c, 0x7d, 0xdd, 0x31, - 0x9c, 0x0b, 0x8b, 0xe6, 0xb9, 0x70, 0xb6, 0x70, 0x81, 0xe7, 0x9c, 0x08, 0x5f, 0x2d, 0xc1, 0x19, - 0x85, 0x43, 0xdf, 0x06, 0xfc, 0x8f, 0x58, 0x55, 0x97, 0xa0, 0xe2, 0x29, 0xa9, 0x95, 0x65, 0x8a, - 0x8b, 0x62, 0x99, 0x55, 0x8c, 0x43, 0x59, 0x3c, 0x2f, 0x16, 0x2d, 0x8d, 0xe9, 0xe2, 0x5c, 0x21, - 0xba, 0x5d, 0x84, 0x72, 0xd7, 0x6d, 0x8a, 0x0b, 0xe6, 0x03, 0x72, 0xb4, 0x6f, 0xae, 0x56, 0xef, - 0x1f, 0xcc, 0x3d, 0x9e, 0xa7, 0x4a, 0xa0, 0x37, 0x5b, 0x38, 0x7f, 0x73, 0xb5, 0x8a, 0x69, 0x65, - 0xb4, 0x00, 0x93, 0x52, 0x5b, 0x72, 0x8b, 0xb2, 0x5b, 0xbe, 0x27, 0xee, 0x21, 0x25, 0x93, 0xc5, - 0x26, 0x18, 0x27, 0xf1, 0x51, 0x15, 0xa6, 0x76, 0xbb, 0x9b, 0xa4, 0x45, 0x22, 0xfe, 0xc1, 0xd7, - 0x08, 0x97, 0x58, 0x56, 0xe2, 0x97, 0xd9, 0xb5, 0x04, 0x1c, 0xa7, 0x6a, 0xd8, 0x7f, 0xce, 0xee, - 0x03, 0x31, 0x7a, 0xb5, 0xc0, 0xa7, 0x0b, 0x8b, 0x52, 0xff, 0x46, 0x2e, 0xe7, 0x7e, 0x56, 0xc5, - 0x35, 0xb2, 0xbf, 0xe1, 0x53, 0xce, 0x3c, 0x7b, 0x55, 0x18, 0x6b, 0x7e, 0xa0, 0x70, 0xcd, 0xff, - 0x5c, 0x09, 0x4e, 0xa9, 0x11, 0x30, 0x98, 0xc0, 0x6f, 0xf6, 0x31, 0xb8, 0x0c, 0xa3, 0x4d, 0xb2, - 0xe5, 0x74, 0x5b, 0x91, 0x12, 0x9f, 0x0f, 0x72, 0x15, 0x4a, 0x35, 0x2e, 0xc6, 0x3a, 0xce, 0x21, - 0x86, 0xed, 0x7f, 0x8f, 0xb2, 0x8b, 0x38, 0x72, 0xe8, 0x1a, 0x57, 0xbb, 0xc6, 0xca, 0xdd, 0x35, - 0x4f, 0xc0, 0xa0, 0xdb, 0xa6, 0x8c, 0x59, 0xc9, 0xe4, 0xb7, 0x56, 0x69, 0x21, 0xe6, 0x30, 0xf4, - 0x3e, 0x18, 0x6e, 0xf8, 0xed, 0xb6, 0xe3, 0x35, 0xd9, 0x95, 0x57, 0x59, 0x1c, 0xa5, 0xbc, 0xdb, - 0x12, 0x2f, 0xc2, 0x12, 0x86, 0x1e, 0x83, 0x01, 0x27, 0xd8, 0xe6, 0x32, 0x8c, 0xca, 0xe2, 0x08, - 0x6d, 0x69, 0x21, 0xd8, 0x0e, 0x31, 0x2b, 0xa5, 0x4f, 0xb0, 0x3b, 0x7e, 0xb0, 0xeb, 0x7a, 0xdb, - 0x55, 0x37, 0x10, 0x5b, 0x42, 0xdd, 0x85, 0xb7, 0x15, 0x04, 0x6b, 0x58, 0x68, 0x05, 0x06, 0x3b, - 0x7e, 0x10, 0x85, 0x33, 0x43, 0x6c, 0xb8, 0x1f, 0xcf, 0x39, 0x88, 0xf8, 0xd7, 0xd6, 0xfc, 0x20, - 0x8a, 0x3f, 0x80, 0xfe, 0x0b, 0x31, 0xaf, 0x8e, 0xae, 0xc3, 0x30, 0xf1, 0xf6, 0x56, 0x02, 0xbf, - 0x3d, 0x73, 0x22, 0x9f, 0xd2, 0x32, 0x47, 0xe1, 0xcb, 0x2c, 0xe6, 0x51, 0x45, 0x31, 0x96, 0x24, - 0xd0, 0xb7, 0x41, 0x99, 0x78, 0x7b, 0x33, 0xc3, 0x8c, 0xd2, 0x6c, 0x0e, 0xa5, 0x5b, 0x4e, 0x10, - 0x9f, 0xf9, 0xcb, 0xde, 0x1e, 0xa6, 0x75, 0xd0, 0xc7, 0xa1, 0x22, 0x0f, 0x8c, 0x50, 0x08, 0xeb, - 0x32, 0x17, 0xac, 0x3c, 0x66, 0x30, 0x79, 0xb3, 0xeb, 0x06, 0xa4, 0x4d, 0xbc, 0x28, 0x8c, 0x4f, - 0x48, 0x09, 0x0d, 0x71, 0x4c, 0x0d, 0x7d, 0x5c, 0x4a, 0x88, 0xd7, 0xfc, 0xae, 0x17, 0x85, 0x33, - 0x15, 0xd6, 0xbd, 0x4c, 0xdd, 0xdd, 0xad, 0x18, 0x2f, 0x29, 0x42, 0xe6, 0x95, 0xb1, 0x41, 0x0a, - 0x7d, 0x12, 0xc6, 0xf9, 0x7f, 0xae, 0x01, 0x0b, 0x67, 0x4e, 0x31, 0xda, 0xe7, 0xf3, 0x69, 0x73, - 0xc4, 0xc5, 0x53, 0x82, 0xf8, 0xb8, 0x5e, 0x1a, 0x62, 0x93, 0x1a, 0xc2, 0x30, 0xde, 0x72, 0xf7, - 0x88, 0x47, 0xc2, 0xb0, 0x16, 0xf8, 0x9b, 0x64, 0x06, 0xd8, 0xc0, 0x9c, 0xc9, 0xd6, 0x98, 0xf9, - 0x9b, 0x64, 0x71, 0x9a, 0xd2, 0xbc, 0xae, 0xd7, 0xc1, 0x26, 0x09, 0x74, 0x13, 0x26, 0xe8, 0x8b, - 0xcd, 0x8d, 0x89, 0x8e, 0xf6, 0x22, 0xca, 0xde, 0x55, 0xd8, 0xa8, 0x84, 0x13, 0x44, 0xd0, 0x0d, - 0x18, 0x0b, 0x23, 0x27, 0x88, 0xba, 0x1d, 0x4e, 0xf4, 0x74, 0x2f, 0xa2, 0x4c, 0xe1, 0x5a, 0xd7, - 0xaa, 0x60, 0x83, 0x00, 0x7a, 0x0d, 0x2a, 0x2d, 0x77, 0x8b, 0x34, 0xf6, 0x1b, 0x2d, 0x32, 0x33, - 0xc6, 0xa8, 0x65, 0x1e, 0x2a, 0xd7, 0x25, 0x12, 0xe7, 0x73, 0xd5, 0x5f, 0x1c, 0x57, 0x47, 0xb7, - 0xe0, 0x74, 0x44, 0x82, 0xb6, 0xeb, 0x39, 0xf4, 0x30, 0x10, 0x4f, 0x2b, 0xa6, 0xc8, 0x1c, 0x67, - 0xbb, 0xed, 0x9c, 0x98, 0x8d, 0xd3, 0x1b, 0x99, 0x58, 0x38, 0xa7, 0x36, 0xba, 0x0b, 0x33, 0x19, - 0x10, 0xbf, 0xe5, 0x36, 0xf6, 0x67, 0x4e, 0x32, 0xca, 0x1f, 0x16, 0x94, 0x67, 0x36, 0x72, 0xf0, - 0xee, 0x17, 0xc0, 0x70, 0x2e, 0x75, 0x74, 0x03, 0x26, 0xd9, 0x09, 0x54, 0xeb, 0xb6, 0x5a, 0xa2, - 0xc1, 0x09, 0xd6, 0xe0, 0xfb, 0xe4, 0x7d, 0xbc, 0x6a, 0x82, 0xef, 0x1f, 0xcc, 0x41, 0xfc, 0x0f, - 0x27, 0x6b, 0xa3, 0x4d, 0xa6, 0x33, 0xeb, 0x06, 0x6e, 0xb4, 0x4f, 0xcf, 0x0d, 0x72, 0x37, 0x9a, - 0x99, 0x2c, 0x94, 0x57, 0xe8, 0xa8, 0x4a, 0xb1, 0xa6, 0x17, 0xe2, 0x24, 0x41, 0x7a, 0xa4, 0x86, - 0x51, 0xd3, 0xf5, 0x66, 0xa6, 0xf8, 0xbb, 0x44, 0x9e, 0x48, 0x75, 0x5a, 0x88, 0x39, 0x8c, 0xe9, - 0xcb, 0xe8, 0x8f, 0x1b, 0xf4, 0xe6, 0x9a, 0x66, 0x88, 0xb1, 0xbe, 0x4c, 0x02, 0x70, 0x8c, 0x43, - 0x99, 0xc9, 0x28, 0xda, 0x9f, 0x41, 0x0c, 0x55, 0x1d, 0x2c, 0x1b, 0x1b, 0x1f, 0xc7, 0xb4, 0xdc, - 0xde, 0x84, 0x09, 0x75, 0x10, 0xb2, 0x31, 0x41, 0x73, 0x30, 0xc8, 0xd8, 0x27, 0x21, 0x5d, 0xab, - 0xd0, 0x2e, 0x30, 0xd6, 0x0a, 0xf3, 0x72, 0xd6, 0x05, 0xf7, 0x2d, 0xb2, 0xb8, 0x1f, 0x11, 0xfe, - 0xa6, 0x2f, 0x6b, 0x5d, 0x90, 0x00, 0x1c, 0xe3, 0xd8, 0xff, 0x8f, 0xb3, 0xa1, 0xf1, 0x69, 0xdb, - 0xc7, 0xfd, 0xf2, 0x0c, 0x8c, 0xec, 0xf8, 0x61, 0x44, 0xb1, 0x59, 0x1b, 0x83, 0x31, 0xe3, 0x79, - 0x55, 0x94, 0x63, 0x85, 0x81, 0x5e, 0x81, 0xf1, 0x86, 0xde, 0x80, 0xb8, 0x1c, 0xd5, 0x31, 0x62, - 0xb4, 0x8e, 0x4d, 0x5c, 0xf4, 0x12, 0x8c, 0x30, 0x1b, 0x90, 0x86, 0xdf, 0x12, 0x5c, 0x9b, 0xbc, - 0xe1, 0x47, 0x6a, 0xa2, 0xfc, 0xbe, 0xf6, 0x1b, 0x2b, 0x6c, 0x74, 0x01, 0x86, 0x68, 0x17, 0x56, - 0x6b, 0xe2, 0x5a, 0x52, 0x82, 0xa2, 0xab, 0xac, 0x14, 0x0b, 0xa8, 0xfd, 0x57, 0x4a, 0xda, 0x28, - 0xd3, 0xf7, 0x30, 0x41, 0x35, 0x18, 0xbe, 0xe3, 0xb8, 0x91, 0xeb, 0x6d, 0x0b, 0xfe, 0xe3, 0xa9, - 0xc2, 0x3b, 0x8a, 0x55, 0xba, 0xcd, 0x2b, 0xf0, 0x5b, 0x54, 0xfc, 0xc1, 0x92, 0x0c, 0xa5, 0x18, - 0x74, 0x3d, 0x8f, 0x52, 0x2c, 0xf5, 0x4b, 0x11, 0xf3, 0x0a, 0x9c, 0xa2, 0xf8, 0x83, 0x25, 0x19, - 0xf4, 0x06, 0x80, 0xdc, 0x61, 0xa4, 0x29, 0x6c, 0x2f, 0x9e, 0xe9, 0x4d, 0x74, 0x43, 0xd5, 0x59, - 0x9c, 0xa0, 0x77, 0x74, 0xfc, 0x1f, 0x6b, 0xf4, 0xec, 0x88, 0xf1, 0x69, 0xe9, 0xce, 0xa0, 0xef, - 0xa0, 0x4b, 0xdc, 0x09, 0x22, 0xd2, 0x5c, 0x88, 0xc4, 0xe0, 0xbc, 0xbf, 0xbf, 0x47, 0xca, 0x86, - 0xdb, 0x26, 0xfa, 0x76, 0x10, 0x44, 0x70, 0x4c, 0xcf, 0xfe, 0x85, 0x32, 0xcc, 0xe4, 0x75, 0x97, - 0x2e, 0x3a, 0x72, 0xd7, 0x8d, 0x96, 0x28, 0x7b, 0x65, 0x99, 0x8b, 0x6e, 0x59, 0x94, 0x63, 0x85, - 0x41, 0x67, 0x3f, 0x74, 0xb7, 0xe5, 0x1b, 0x73, 0x30, 0x9e, 0xfd, 0x3a, 0x2b, 0xc5, 0x02, 0x4a, - 0xf1, 0x02, 0xe2, 0x84, 0xc2, 0xb8, 0x47, 0x5b, 0x25, 0x98, 0x95, 0x62, 0x01, 0xd5, 0xa5, 0x5d, - 0x03, 0x3d, 0xa4, 0x5d, 0xc6, 0x10, 0x0d, 0x1e, 0xed, 0x10, 0xa1, 0x4f, 0x01, 0x6c, 0xb9, 0x9e, - 0x1b, 0xee, 0x30, 0xea, 0x43, 0x87, 0xa6, 0xae, 0x98, 0xb3, 0x15, 0x45, 0x05, 0x6b, 0x14, 0xd1, - 0x8b, 0x30, 0xaa, 0x36, 0xe0, 0x6a, 0x95, 0x69, 0x3a, 0x35, 0xcb, 0x91, 0xf8, 0x34, 0xaa, 0x62, - 0x1d, 0xcf, 0xfe, 0x4c, 0x72, 0xbd, 0x88, 0x1d, 0xa0, 0x8d, 0xaf, 0xd5, 0xef, 0xf8, 0x96, 0x8a, - 0xc7, 0xd7, 0xfe, 0x7a, 0x19, 0x26, 0x8d, 0xc6, 0xba, 0x61, 0x1f, 0x67, 0xd6, 0x15, 0x7a, 0x80, - 0x3b, 0x11, 0x11, 0xfb, 0xcf, 0xee, 0xbd, 0x55, 0xf4, 0x43, 0x9e, 0xee, 0x00, 0x5e, 0x1f, 0x7d, - 0x0a, 0x2a, 0x2d, 0x27, 0x64, 0x92, 0x33, 0x22, 0xf6, 0x5d, 0x3f, 0xc4, 0xe2, 0x87, 0x89, 0x13, - 0x46, 0xda, 0xad, 0xc9, 0x69, 0xc7, 0x24, 0xe9, 0x4d, 0x43, 0xf9, 0x13, 0x69, 0x3d, 0xa6, 0x3a, - 0x41, 0x99, 0x98, 0x7d, 0xcc, 0x61, 0xe8, 0x25, 0x18, 0x0b, 0x08, 0x5b, 0x15, 0x4b, 0x94, 0x9b, - 0x63, 0xcb, 0x6c, 0x30, 0x66, 0xfb, 0xb0, 0x06, 0xc3, 0x06, 0x66, 0xfc, 0x36, 0x18, 0x2a, 0x78, - 0x1b, 0x3c, 0x05, 0xc3, 0xec, 0x87, 0x5a, 0x01, 0x6a, 0x36, 0x56, 0x79, 0x31, 0x96, 0xf0, 0xe4, - 0x82, 0x19, 0xe9, 0x6f, 0xc1, 0xd0, 0xd7, 0x87, 0x58, 0xd4, 0x4c, 0xcb, 0x3c, 0xc2, 0x4f, 0x39, - 0xb1, 0xe4, 0xb1, 0x84, 0xd9, 0xef, 0x87, 0x89, 0xaa, 0x43, 0xda, 0xbe, 0xb7, 0xec, 0x35, 0x3b, - 0xbe, 0xeb, 0x45, 0x68, 0x06, 0x06, 0xd8, 0x25, 0xc2, 0x8f, 0x80, 0x01, 0xda, 0x10, 0x66, 0x25, - 0xf6, 0x36, 0x9c, 0xaa, 0xfa, 0x77, 0xbc, 0x3b, 0x4e, 0xd0, 0x5c, 0xa8, 0xad, 0x6a, 0xef, 0xeb, - 0x75, 0xf9, 0xbe, 0xe3, 0x46, 0x5b, 0x99, 0x47, 0xaf, 0x56, 0x93, 0xb3, 0xb5, 0x2b, 0x6e, 0x8b, - 0xe4, 0x48, 0x41, 0xfe, 0x7a, 0xc9, 0x68, 0x29, 0xc6, 0x57, 0x5a, 0x2d, 0x2b, 0x57, 0xab, 0xf5, - 0x3a, 0x8c, 0x6c, 0xb9, 0xa4, 0xd5, 0xc4, 0x64, 0x4b, 0xac, 0xc4, 0x27, 0xf3, 0xed, 0x50, 0x56, - 0x28, 0xa6, 0x94, 0x7a, 0xf1, 0xd7, 0xe1, 0x8a, 0xa8, 0x8c, 0x15, 0x19, 0xb4, 0x0b, 0x53, 0xf2, - 0xc1, 0x20, 0xa1, 0x62, 0x5d, 0x3e, 0x55, 0xf4, 0x0a, 0x31, 0x89, 0x9f, 0xbc, 0x77, 0x30, 0x37, - 0x85, 0x13, 0x64, 0x70, 0x8a, 0x30, 0x7d, 0x0e, 0xb6, 0xe9, 0x09, 0x3c, 0xc0, 0x86, 0x9f, 0x3d, - 0x07, 0xd9, 0xcb, 0x96, 0x95, 0xda, 0x3f, 0x6e, 0xc1, 0x23, 0xa9, 0x91, 0x11, 0x2f, 0xfc, 0x23, - 0x9e, 0x85, 0xe4, 0x8b, 0xbb, 0xd4, 0xfb, 0xc5, 0x6d, 0xff, 0x3d, 0x0b, 0x4e, 0x2e, 0xb7, 0x3b, - 0xd1, 0x7e, 0xd5, 0x35, 0x55, 0x50, 0x1f, 0x82, 0xa1, 0x36, 0x69, 0xba, 0xdd, 0xb6, 0x98, 0xb9, - 0x39, 0x79, 0x4a, 0xad, 0xb1, 0xd2, 0xfb, 0x07, 0x73, 0xe3, 0xf5, 0xc8, 0x0f, 0x9c, 0x6d, 0xc2, - 0x0b, 0xb0, 0x40, 0x67, 0x67, 0xbd, 0xfb, 0x16, 0xb9, 0xee, 0xb6, 0x5d, 0x69, 0x57, 0x54, 0x28, - 0xb3, 0x9b, 0x97, 0x03, 0x3a, 0xff, 0x7a, 0xd7, 0xf1, 0x22, 0x37, 0xda, 0x17, 0xda, 0x23, 0x49, - 0x04, 0xc7, 0xf4, 0xec, 0xaf, 0x59, 0x30, 0x29, 0xd7, 0xfd, 0x42, 0xb3, 0x19, 0x90, 0x30, 0x44, - 0xb3, 0x50, 0x72, 0x3b, 0xa2, 0x97, 0x20, 0x7a, 0x59, 0x5a, 0xad, 0xe1, 0x92, 0xdb, 0x91, 0x6c, - 0x19, 0x3b, 0x08, 0xcb, 0xa6, 0x22, 0xed, 0xaa, 0x28, 0xc7, 0x0a, 0x03, 0x5d, 0x84, 0x11, 0xcf, - 0x6f, 0x72, 0xdb, 0x2e, 0x7e, 0xa5, 0xb1, 0x05, 0xb6, 0x2e, 0xca, 0xb0, 0x82, 0xa2, 0x1a, 0x54, - 0xb8, 0xd9, 0x53, 0xbc, 0x68, 0xfb, 0x32, 0x9e, 0x62, 0x5f, 0xb6, 0x21, 0x6b, 0xe2, 0x98, 0x88, - 0xfd, 0x2b, 0x16, 0x8c, 0xc9, 0x2f, 0xeb, 0x93, 0xe7, 0xa4, 0x5b, 0x2b, 0xe6, 0x37, 0xe3, 0xad, - 0x45, 0x79, 0x46, 0x06, 0x31, 0x58, 0xc5, 0xf2, 0xa1, 0x58, 0xc5, 0xcb, 0x30, 0xea, 0x74, 0x3a, - 0x35, 0x93, 0xcf, 0x64, 0x4b, 0x69, 0x21, 0x2e, 0xc6, 0x3a, 0x8e, 0xfd, 0x63, 0x25, 0x98, 0x90, - 0x5f, 0x50, 0xef, 0x6e, 0x86, 0x24, 0x42, 0x1b, 0x50, 0x71, 0xf8, 0x2c, 0x11, 0xb9, 0xc8, 0x9f, - 0xc8, 0x96, 0x23, 0x18, 0x53, 0x1a, 0x5f, 0xf8, 0x0b, 0xb2, 0x36, 0x8e, 0x09, 0xa1, 0x16, 0x4c, - 0x7b, 0x7e, 0xc4, 0x0e, 0x7f, 0x05, 0x2f, 0x52, 0xed, 0x24, 0xa9, 0x9f, 0x11, 0xd4, 0xa7, 0xd7, - 0x93, 0x54, 0x70, 0x9a, 0x30, 0x5a, 0x96, 0xb2, 0x99, 0x72, 0xbe, 0x30, 0x40, 0x9f, 0xb8, 0x6c, - 0xd1, 0x8c, 0xfd, 0xcb, 0x16, 0x54, 0x24, 0xda, 0x71, 0x68, 0xf1, 0xd6, 0x60, 0x38, 0x64, 0x93, - 0x20, 0x87, 0xc6, 0x2e, 0xea, 0x38, 0x9f, 0xaf, 0xf8, 0x4e, 0xe3, 0xff, 0x43, 0x2c, 0x69, 0x30, - 0xd1, 0xbc, 0xea, 0xfe, 0x3b, 0x44, 0x34, 0xaf, 0xfa, 0x93, 0x73, 0x29, 0xfd, 0x21, 0xeb, 0xb3, - 0x26, 0xeb, 0xa2, 0xac, 0x57, 0x27, 0x20, 0x5b, 0xee, 0xdd, 0x24, 0xeb, 0x55, 0x63, 0xa5, 0x58, - 0x40, 0xd1, 0x1b, 0x30, 0xd6, 0x90, 0x32, 0xd9, 0x78, 0x87, 0x5f, 0x28, 0xd4, 0x0f, 0x28, 0x55, - 0x12, 0x97, 0x85, 0x2c, 0x69, 0xf5, 0xb1, 0x41, 0xcd, 0x34, 0x23, 0x28, 0xf7, 0x32, 0x23, 0x88, - 0xe9, 0xe6, 0x2b, 0xd5, 0x7f, 0xc2, 0x82, 0x21, 0x2e, 0x8b, 0xeb, 0x4f, 0x14, 0xaa, 0x69, 0xd6, - 0xe2, 0xb1, 0xbb, 0x45, 0x0b, 0x85, 0xa6, 0x0c, 0xad, 0x41, 0x85, 0xfd, 0x60, 0xb2, 0xc4, 0x72, - 0xbe, 0xd5, 0x3d, 0x6f, 0x55, 0xef, 0xe0, 0x2d, 0x59, 0x0d, 0xc7, 0x14, 0xec, 0x1f, 0x2d, 0xd3, - 0xd3, 0x2d, 0x46, 0x35, 0x2e, 0x7d, 0xeb, 0xe1, 0x5d, 0xfa, 0xa5, 0x87, 0x75, 0xe9, 0x6f, 0xc3, - 0x64, 0x43, 0xd3, 0xc3, 0xc5, 0x33, 0x79, 0xb1, 0x70, 0x91, 0x68, 0x2a, 0x3b, 0x2e, 0x65, 0x59, - 0x32, 0x89, 0xe0, 0x24, 0x55, 0xf4, 0x1d, 0x30, 0xc6, 0xe7, 0x59, 0xb4, 0xc2, 0x2d, 0x31, 0xde, - 0x97, 0xbf, 0x5e, 0xf4, 0x26, 0xb8, 0x54, 0x4e, 0xab, 0x8e, 0x0d, 0x62, 0xf6, 0x9f, 0x58, 0x80, - 0x96, 0x3b, 0x3b, 0xa4, 0x4d, 0x02, 0xa7, 0x15, 0x8b, 0xd3, 0x7f, 0xd0, 0x82, 0x19, 0x92, 0x2a, - 0x5e, 0xf2, 0xdb, 0x6d, 0xf1, 0x68, 0xc9, 0x79, 0x57, 0x2f, 0xe7, 0xd4, 0x51, 0x6e, 0x09, 0x33, - 0x79, 0x18, 0x38, 0xb7, 0x3d, 0xb4, 0x06, 0x27, 0xf8, 0x2d, 0xa9, 0x00, 0x9a, 0xed, 0xf5, 0xa3, - 0x82, 0xf0, 0x89, 0x8d, 0x34, 0x0a, 0xce, 0xaa, 0x67, 0x7f, 0xcf, 0x18, 0xe4, 0xf6, 0xe2, 0x5d, - 0x3d, 0xc2, 0xbb, 0x7a, 0x84, 0x77, 0xf5, 0x08, 0xef, 0xea, 0x11, 0xde, 0xd5, 0x23, 0x7c, 0xcb, - 0xeb, 0x11, 0xfe, 0xaa, 0x05, 0xa7, 0xd4, 0x35, 0x60, 0x3c, 0x7c, 0x3f, 0x0b, 0x27, 0xf8, 0x76, - 0x5b, 0x6a, 0x39, 0x6e, 0x7b, 0x83, 0xb4, 0x3b, 0x2d, 0x27, 0x92, 0x5a, 0xf7, 0xcb, 0x99, 0x2b, - 0x37, 0x61, 0xb1, 0x6a, 0x54, 0x5c, 0x7c, 0x84, 0x5e, 0x4f, 0x19, 0x00, 0x9c, 0xd5, 0x8c, 0xfd, - 0x0b, 0x23, 0x30, 0xb8, 0xbc, 0x47, 0xbc, 0xe8, 0x18, 0x9e, 0x08, 0x0d, 0x98, 0x70, 0xbd, 0x3d, - 0xbf, 0xb5, 0x47, 0x9a, 0x1c, 0x7e, 0x98, 0x97, 0xec, 0x69, 0x41, 0x7a, 0x62, 0xd5, 0x20, 0x81, - 0x13, 0x24, 0x1f, 0x86, 0x34, 0xf9, 0x0a, 0x0c, 0xf1, 0x43, 0x5c, 0x88, 0x92, 0x33, 0xcf, 0x6c, - 0x36, 0x88, 0xe2, 0x6a, 0x8a, 0x25, 0xdd, 0xfc, 0x92, 0x10, 0xd5, 0xd1, 0x67, 0x60, 0x62, 0xcb, - 0x0d, 0xc2, 0x68, 0xc3, 0x6d, 0x93, 0x30, 0x72, 0xda, 0x9d, 0x07, 0x90, 0x1e, 0xab, 0x71, 0x58, - 0x31, 0x28, 0xe1, 0x04, 0x65, 0xb4, 0x0d, 0xe3, 0x2d, 0x47, 0x6f, 0x6a, 0xf8, 0xd0, 0x4d, 0xa9, - 0xdb, 0xe1, 0xba, 0x4e, 0x08, 0x9b, 0x74, 0xe9, 0x76, 0x6a, 0x30, 0x01, 0xe8, 0x08, 0x13, 0x0b, - 0xa8, 0xed, 0xc4, 0x25, 0x9f, 0x1c, 0x46, 0x19, 0x1d, 0x66, 0x20, 0x5b, 0x31, 0x19, 0x1d, 0xcd, - 0x0c, 0xf6, 0xd3, 0x50, 0x21, 0x74, 0x08, 0x29, 0x61, 0x71, 0xc1, 0x5c, 0xea, 0xaf, 0xaf, 0x6b, - 0x6e, 0x23, 0xf0, 0x4d, 0xb9, 0xfd, 0xb2, 0xa4, 0x84, 0x63, 0xa2, 0x68, 0x09, 0x86, 0x42, 0x12, - 0xb8, 0x24, 0x14, 0x57, 0x4d, 0xc1, 0x34, 0x32, 0x34, 0xee, 0x5b, 0xc2, 0x7f, 0x63, 0x51, 0x95, - 0x2e, 0x2f, 0x87, 0x89, 0x34, 0xd9, 0x65, 0xa0, 0x2d, 0xaf, 0x05, 0x56, 0x8a, 0x05, 0x14, 0xbd, - 0x06, 0xc3, 0x01, 0x69, 0x31, 0xc5, 0xd0, 0x78, 0xff, 0x8b, 0x9c, 0xeb, 0x99, 0x78, 0x3d, 0x2c, - 0x09, 0xa0, 0x6b, 0x80, 0x02, 0x42, 0x19, 0x25, 0xd7, 0xdb, 0x56, 0x66, 0xa3, 0xe2, 0xa0, 0x55, - 0x0c, 0x29, 0x8e, 0x31, 0xa4, 0x9b, 0x0f, 0xce, 0xa8, 0x86, 0xae, 0xc0, 0xb4, 0x2a, 0x5d, 0xf5, - 0xc2, 0xc8, 0xa1, 0x07, 0xdc, 0x24, 0xa3, 0xa5, 0xe4, 0x14, 0x38, 0x89, 0x80, 0xd3, 0x75, 0xec, - 0x2f, 0x59, 0xc0, 0xc7, 0xf9, 0x18, 0x5e, 0xe7, 0xaf, 0x9a, 0xaf, 0xf3, 0x33, 0xb9, 0x33, 0x97, - 0xf3, 0x32, 0xff, 0x92, 0x05, 0xa3, 0xda, 0xcc, 0xc6, 0x6b, 0xd6, 0x2a, 0x58, 0xb3, 0x5d, 0x98, - 0xa2, 0x2b, 0xfd, 0xc6, 0x66, 0x48, 0x82, 0x3d, 0xd2, 0x64, 0x0b, 0xb3, 0xf4, 0x60, 0x0b, 0x53, - 0x99, 0xa8, 0x5d, 0x4f, 0x10, 0xc4, 0xa9, 0x26, 0xec, 0x4f, 0xcb, 0xae, 0x2a, 0x8b, 0xbe, 0x86, - 0x9a, 0xf3, 0x84, 0x45, 0x9f, 0x9a, 0x55, 0x1c, 0xe3, 0xd0, 0xad, 0xb6, 0xe3, 0x87, 0x51, 0xd2, - 0xa2, 0xef, 0xaa, 0x1f, 0x46, 0x98, 0x41, 0xec, 0xe7, 0x01, 0x96, 0xef, 0x92, 0x06, 0x5f, 0xb1, - 0xfa, 0xe3, 0xc1, 0xca, 0x7f, 0x3c, 0xd8, 0xbf, 0x65, 0xc1, 0xc4, 0xca, 0x92, 0x71, 0x73, 0xcd, - 0x03, 0xf0, 0x17, 0xcf, 0xed, 0xdb, 0xeb, 0x52, 0x1d, 0xce, 0x35, 0x9a, 0xaa, 0x14, 0x6b, 0x18, - 0xe8, 0x0c, 0x94, 0x5b, 0x5d, 0x4f, 0x88, 0x0f, 0x87, 0xe9, 0xf5, 0x78, 0xbd, 0xeb, 0x61, 0x5a, - 0xa6, 0xb9, 0x14, 0x94, 0xfb, 0x76, 0x29, 0xe8, 0xe9, 0xda, 0x8f, 0xe6, 0x60, 0xf0, 0xce, 0x1d, - 0xb7, 0xc9, 0x1d, 0x28, 0x85, 0xaa, 0xfe, 0xf6, 0xed, 0xd5, 0x6a, 0x88, 0x79, 0xb9, 0xfd, 0x85, - 0x32, 0xcc, 0xae, 0xb4, 0xc8, 0xdd, 0xb7, 0xe9, 0x44, 0xda, 0xaf, 0x43, 0xc4, 0xe1, 0x04, 0x31, - 0x87, 0x75, 0x7a, 0xe9, 0x3d, 0x1e, 0x5b, 0x30, 0xcc, 0x0d, 0xda, 0xa4, 0x4b, 0xe9, 0x2b, 0x59, - 0xad, 0xe7, 0x0f, 0xc8, 0x3c, 0x37, 0x8c, 0x13, 0x1e, 0x71, 0xea, 0xc2, 0x14, 0xa5, 0x58, 0x12, - 0x9f, 0x7d, 0x19, 0xc6, 0x74, 0xcc, 0x43, 0xb9, 0x9f, 0xfd, 0xa5, 0x32, 0x4c, 0xd1, 0x1e, 0x3c, - 0xd4, 0x89, 0xb8, 0x99, 0x9e, 0x88, 0xa3, 0x76, 0x41, 0xea, 0x3d, 0x1b, 0x6f, 0x24, 0x67, 0xe3, - 0x72, 0xde, 0x6c, 0x1c, 0xf7, 0x1c, 0x7c, 0xb7, 0x05, 0x27, 0x56, 0x5a, 0x7e, 0x63, 0x37, 0xe1, - 0x26, 0xf4, 0x22, 0x8c, 0xd2, 0xe3, 0x38, 0x34, 0x3c, 0xd8, 0x8d, 0x98, 0x06, 0x02, 0x84, 0x75, - 0x3c, 0xad, 0xda, 0xcd, 0x9b, 0xab, 0xd5, 0xac, 0x50, 0x08, 0x02, 0x84, 0x75, 0x3c, 0xfb, 0xd7, - 0x2d, 0x38, 0x7b, 0x65, 0x69, 0x39, 0x5e, 0x8a, 0xa9, 0x68, 0x0c, 0x17, 0x60, 0xa8, 0xd3, 0xd4, - 0xba, 0x12, 0x8b, 0x57, 0xab, 0xac, 0x17, 0x02, 0xfa, 0x4e, 0x89, 0x34, 0x72, 0x13, 0xe0, 0x0a, - 0xae, 0x2d, 0x89, 0x73, 0x57, 0x6a, 0x53, 0xac, 0x5c, 0x6d, 0xca, 0xfb, 0x60, 0x98, 0xde, 0x0b, - 0x6e, 0x43, 0xf6, 0x9b, 0x2b, 0x68, 0x79, 0x11, 0x96, 0x30, 0xfb, 0x67, 0x2c, 0x38, 0x71, 0xc5, - 0x8d, 0xe8, 0xa5, 0x9d, 0x0c, 0x37, 0x40, 0x6f, 0xed, 0xd0, 0x8d, 0xfc, 0x60, 0x3f, 0x19, 0x6e, - 0x00, 0x2b, 0x08, 0xd6, 0xb0, 0xf8, 0x07, 0xed, 0xb9, 0xcc, 0x42, 0xbb, 0x64, 0xea, 0xaf, 0xb0, - 0x28, 0xc7, 0x0a, 0x83, 0x8e, 0x57, 0xd3, 0x0d, 0x98, 0xe8, 0x6f, 0x5f, 0x1c, 0xdc, 0x6a, 0xbc, - 0xaa, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0x8f, 0x2c, 0x98, 0xbb, 0xd2, 0xea, 0x86, 0x11, 0x09, 0xb6, - 0xc2, 0x9c, 0x43, 0xf7, 0x79, 0xa8, 0x10, 0x29, 0x68, 0x17, 0xbd, 0x56, 0x8c, 0xa8, 0x92, 0xc0, - 0xf3, 0xa8, 0x07, 0x0a, 0xaf, 0x0f, 0x5f, 0xc6, 0xc3, 0x39, 0xa3, 0xad, 0x00, 0x22, 0x7a, 0x5b, - 0x7a, 0x18, 0x08, 0xe6, 0x4f, 0xbe, 0x9c, 0x82, 0xe2, 0x8c, 0x1a, 0xf6, 0x8f, 0x5b, 0x70, 0x4a, - 0x7d, 0xf0, 0x3b, 0xee, 0x33, 0xed, 0xaf, 0x94, 0x60, 0xfc, 0xea, 0xc6, 0x46, 0xed, 0x0a, 0x89, - 0xb4, 0x55, 0x59, 0xac, 0x3e, 0xc7, 0x9a, 0x16, 0xb0, 0xe8, 0x8d, 0xd8, 0x8d, 0xdc, 0xd6, 0x3c, - 0x8f, 0x26, 0x34, 0xbf, 0xea, 0x45, 0x37, 0x82, 0x7a, 0x14, 0xb8, 0xde, 0x76, 0xe6, 0x4a, 0x97, - 0x3c, 0x4b, 0x39, 0x8f, 0x67, 0x41, 0xcf, 0xc3, 0x10, 0x0b, 0x67, 0x24, 0x27, 0xe1, 0x51, 0xf5, - 0xc4, 0x62, 0xa5, 0xf7, 0x0f, 0xe6, 0x2a, 0x37, 0xf1, 0x2a, 0xff, 0x83, 0x05, 0x2a, 0xba, 0x09, - 0xa3, 0x3b, 0x51, 0xd4, 0xb9, 0x4a, 0x9c, 0x26, 0x09, 0xe4, 0x29, 0x7b, 0x2e, 0xeb, 0x94, 0xa5, - 0x83, 0xc0, 0xd1, 0xe2, 0x83, 0x29, 0x2e, 0x0b, 0xb1, 0x4e, 0xc7, 0xae, 0x03, 0xc4, 0xb0, 0x23, - 0x52, 0x80, 0xd8, 0x1b, 0x50, 0xa1, 0x9f, 0xbb, 0xd0, 0x72, 0x9d, 0x62, 0x15, 0xf3, 0xd3, 0x50, - 0x91, 0x0a, 0xe4, 0x50, 0xf8, 0x5a, 0xb3, 0x1b, 0x49, 0xea, 0x97, 0x43, 0x1c, 0xc3, 0xed, 0x2d, - 0x38, 0xc9, 0xcc, 0x01, 0x9d, 0x68, 0xc7, 0x58, 0x7d, 0xbd, 0xa7, 0xf9, 0x19, 0xf1, 0x62, 0xe3, - 0x7d, 0x9e, 0xd1, 0xdc, 0x19, 0xc7, 0x24, 0xc5, 0xf8, 0xf5, 0x66, 0x7f, 0x7d, 0x00, 0x1e, 0x5d, - 0xad, 0xe7, 0x87, 0xe3, 0x78, 0x09, 0xc6, 0x38, 0x23, 0x48, 0x27, 0xdd, 0x69, 0x89, 0x76, 0x95, - 0x6c, 0x73, 0x43, 0x83, 0x61, 0x03, 0x13, 0x9d, 0x85, 0xb2, 0xfb, 0xa6, 0x97, 0x74, 0xf6, 0x59, - 0x7d, 0x7d, 0x1d, 0xd3, 0x72, 0x0a, 0xa6, 0x3c, 0x25, 0x3f, 0xac, 0x15, 0x58, 0xf1, 0x95, 0xaf, - 0xc2, 0x84, 0x1b, 0x36, 0x42, 0x77, 0xd5, 0xa3, 0x3b, 0x50, 0xdb, 0xc3, 0x4a, 0x9a, 0x40, 0x3b, - 0xad, 0xa0, 0x38, 0x81, 0xad, 0xdd, 0x1c, 0x83, 0x7d, 0xf3, 0xa5, 0x3d, 0x9d, 0x8f, 0xe9, 0xc1, - 0xde, 0x61, 0x5f, 0x17, 0x32, 0x21, 0xb5, 0x38, 0xd8, 0xf9, 0x07, 0x87, 0x58, 0xc2, 0xe8, 0x53, - 0xad, 0xb1, 0xe3, 0x74, 0x16, 0xba, 0xd1, 0x4e, 0xd5, 0x0d, 0x1b, 0xfe, 0x1e, 0x09, 0xf6, 0xd9, - 0x2b, 0x7b, 0x24, 0x7e, 0xaa, 0x29, 0xc0, 0xd2, 0xd5, 0x85, 0x1a, 0xc5, 0xc4, 0xe9, 0x3a, 0x68, - 0x01, 0x26, 0x65, 0x61, 0x9d, 0x84, 0xec, 0x70, 0x1f, 0x65, 0x64, 0x94, 0xfb, 0x8d, 0x28, 0x56, - 0x44, 0x92, 0xf8, 0x26, 0xeb, 0x0a, 0x47, 0xc1, 0xba, 0x7e, 0x08, 0xc6, 0x5d, 0xcf, 0x8d, 0x5c, - 0x27, 0xf2, 0xb9, 0x86, 0x85, 0x3f, 0xa8, 0x99, 0xe8, 0x78, 0x55, 0x07, 0x60, 0x13, 0xcf, 0xfe, - 0xaf, 0x03, 0x30, 0xcd, 0xa6, 0xed, 0xdd, 0x15, 0xf6, 0xad, 0xb4, 0xc2, 0x6e, 0xa6, 0x57, 0xd8, - 0x51, 0xf0, 0xe4, 0x0f, 0xbc, 0xcc, 0x3e, 0x03, 0x15, 0xe5, 0x71, 0x24, 0x5d, 0x0e, 0xad, 0x1c, - 0x97, 0xc3, 0xde, 0xf7, 0xb2, 0x34, 0xda, 0x2a, 0x67, 0x1a, 0x6d, 0x7d, 0xd9, 0x82, 0x58, 0x65, - 0x80, 0x5e, 0x87, 0x4a, 0xc7, 0x67, 0xb6, 0x88, 0x81, 0x34, 0xf0, 0x7d, 0x6f, 0xa1, 0xce, 0x81, - 0x47, 0x24, 0x0a, 0xf8, 0x28, 0xd4, 0x64, 0x55, 0x1c, 0x53, 0x41, 0xd7, 0x60, 0xb8, 0x13, 0x90, - 0x7a, 0xc4, 0xc2, 0x73, 0xf4, 0x4f, 0x90, 0xaf, 0x1a, 0x5e, 0x11, 0x4b, 0x0a, 0xf6, 0x7f, 0xb3, - 0x60, 0x2a, 0x89, 0x8a, 0x3e, 0x0c, 0x03, 0xe4, 0x2e, 0x69, 0x88, 0xfe, 0x66, 0x5e, 0xb2, 0xb1, - 0xd0, 0x81, 0x0f, 0x00, 0xfd, 0x8f, 0x59, 0x2d, 0x74, 0x15, 0x86, 0xe9, 0x0d, 0x7b, 0x45, 0x85, - 0x86, 0x7a, 0x3c, 0xef, 0x96, 0x56, 0xac, 0x0a, 0xef, 0x9c, 0x28, 0xc2, 0xb2, 0x3a, 0xb3, 0x94, - 0x6a, 0x74, 0xea, 0xf4, 0xf1, 0x12, 0x15, 0xbd, 0xb1, 0x37, 0x96, 0x6a, 0x1c, 0x49, 0x50, 0xe3, - 0x96, 0x52, 0xb2, 0x10, 0xc7, 0x44, 0xec, 0x9f, 0xb3, 0x00, 0xb8, 0x61, 0x98, 0xe3, 0x6d, 0x93, - 0x63, 0x90, 0x93, 0x57, 0x61, 0x20, 0xec, 0x90, 0x46, 0x91, 0x99, 0x6c, 0xdc, 0x9f, 0x7a, 0x87, - 0x34, 0xe2, 0x15, 0x47, 0xff, 0x61, 0x56, 0xdb, 0xfe, 0x5e, 0x80, 0x89, 0x18, 0x6d, 0x35, 0x22, - 0x6d, 0xf4, 0xac, 0x11, 0xa6, 0xe0, 0x4c, 0x22, 0x4c, 0x41, 0x85, 0x61, 0x6b, 0x22, 0xd9, 0xcf, - 0x40, 0xb9, 0xed, 0xdc, 0x15, 0x32, 0xb7, 0xa7, 0x8b, 0xbb, 0x41, 0xe9, 0xcf, 0xaf, 0x39, 0x77, - 0xf9, 0xb3, 0xf4, 0x69, 0xb9, 0x43, 0xd6, 0x9c, 0xbb, 0xf7, 0xb9, 0x31, 0x2c, 0x3b, 0xa5, 0xaf, - 0xbb, 0x61, 0xf4, 0xb9, 0xff, 0x12, 0xff, 0x67, 0xfb, 0x8e, 0x36, 0xc2, 0xda, 0x72, 0x3d, 0x61, - 0xf3, 0xd4, 0x57, 0x5b, 0xae, 0x97, 0x6c, 0xcb, 0xf5, 0xfa, 0x68, 0xcb, 0xf5, 0xd0, 0x5b, 0x30, - 0x2c, 0x4c, 0x12, 0x45, 0x58, 0xa0, 0x4b, 0x7d, 0xb4, 0x27, 0x2c, 0x1a, 0x79, 0x9b, 0x97, 0xe4, - 0xb3, 0x5b, 0x94, 0xf6, 0x6c, 0x57, 0x36, 0x88, 0xfe, 0x9a, 0x05, 0x13, 0xe2, 0x37, 0x26, 0x6f, - 0x76, 0x49, 0x18, 0x09, 0xb6, 0xf4, 0x83, 0xfd, 0xf7, 0x41, 0x54, 0xe4, 0x5d, 0xf9, 0xa0, 0xbc, - 0x67, 0x4c, 0x60, 0xcf, 0x1e, 0x25, 0x7a, 0x81, 0xfe, 0x81, 0x05, 0x27, 0xdb, 0xce, 0x5d, 0xde, - 0x22, 0x2f, 0xc3, 0x4e, 0xe4, 0xfa, 0x42, 0xb5, 0xff, 0xe1, 0xfe, 0xa6, 0x3f, 0x55, 0x9d, 0x77, - 0x52, 0xea, 0x1f, 0x4f, 0x66, 0xa1, 0xf4, 0xec, 0x6a, 0x66, 0xbf, 0x66, 0xb7, 0x60, 0x44, 0xae, - 0xb7, 0x0c, 0xe1, 0x46, 0x55, 0xe7, 0xb9, 0x0f, 0x6d, 0x11, 0xaa, 0xbb, 0xff, 0xd3, 0x76, 0xc4, - 0x5a, 0x7b, 0xa8, 0xed, 0x7c, 0x06, 0xc6, 0xf4, 0x35, 0xf6, 0x50, 0xdb, 0x7a, 0x13, 0x4e, 0x64, - 0xac, 0xa5, 0x87, 0xda, 0xe4, 0x1d, 0x38, 0x93, 0xbb, 0x3e, 0x1e, 0x66, 0xc3, 0xf6, 0x57, 0x2c, - 0xfd, 0x1c, 0x3c, 0x06, 0x65, 0xc5, 0x92, 0xa9, 0xac, 0x38, 0x57, 0xbc, 0x73, 0x72, 0x34, 0x16, - 0x6f, 0xe8, 0x9d, 0xa6, 0xa7, 0x3a, 0x7a, 0x0d, 0x86, 0x5a, 0xb4, 0x44, 0x1a, 0xb6, 0xda, 0xbd, - 0x77, 0x64, 0xcc, 0x4c, 0xb2, 0xf2, 0x10, 0x0b, 0x0a, 0xf6, 0x2f, 0x5a, 0x30, 0x70, 0x0c, 0x23, - 0x81, 0xcd, 0x91, 0x78, 0x36, 0x97, 0xb4, 0x88, 0x58, 0x3c, 0x8f, 0x9d, 0x3b, 0xcb, 0x77, 0x23, - 0xe2, 0x85, 0xec, 0x46, 0xce, 0x1c, 0x98, 0x9f, 0xb2, 0xe0, 0xc4, 0x75, 0xdf, 0x69, 0x2e, 0x3a, - 0x2d, 0xc7, 0x6b, 0x90, 0x60, 0xd5, 0xdb, 0x3e, 0x94, 0x55, 0x76, 0xa9, 0xa7, 0x55, 0xf6, 0x92, - 0x34, 0x6a, 0x1a, 0xc8, 0x9f, 0x3f, 0xca, 0x49, 0x27, 0x03, 0xb7, 0x18, 0xe6, 0xb7, 0x3b, 0x80, - 0xf4, 0x5e, 0x0a, 0x1f, 0x19, 0x0c, 0xc3, 0x2e, 0xef, 0xaf, 0x98, 0xc4, 0x27, 0xb3, 0x39, 0xdc, - 0xd4, 0xe7, 0x69, 0xde, 0x1f, 0xbc, 0x00, 0x4b, 0x42, 0xf6, 0x4b, 0x90, 0xe9, 0x68, 0xdf, 0x5b, - 0x2e, 0x61, 0x7f, 0x1c, 0xa6, 0x59, 0xcd, 0x43, 0x4a, 0x06, 0xec, 0x84, 0x34, 0x35, 0x23, 0x04, - 0x9f, 0xfd, 0x79, 0x0b, 0x26, 0xd7, 0x13, 0x91, 0xc9, 0x2e, 0x30, 0xfd, 0x6b, 0x86, 0x10, 0xbf, - 0xce, 0x4a, 0xb1, 0x80, 0x1e, 0xb9, 0x90, 0xeb, 0xcf, 0x2d, 0x88, 0x63, 0x5f, 0x1c, 0x03, 0xfb, - 0xb6, 0x64, 0xb0, 0x6f, 0x99, 0x8c, 0xac, 0xea, 0x4e, 0x1e, 0xf7, 0x86, 0xae, 0xa9, 0xa8, 0x50, - 0x05, 0x3c, 0x6c, 0x4c, 0x86, 0x2f, 0xc5, 0x09, 0x33, 0x74, 0x94, 0x8c, 0x13, 0x65, 0xff, 0x76, - 0x09, 0x90, 0xc2, 0xed, 0x3b, 0x6a, 0x55, 0xba, 0xc6, 0xd1, 0x44, 0xad, 0xda, 0x03, 0xc4, 0x2c, - 0x08, 0x02, 0xc7, 0x0b, 0x39, 0x59, 0x57, 0x88, 0xf5, 0x0e, 0x67, 0x9e, 0x30, 0x2b, 0x9a, 0x44, - 0xd7, 0x53, 0xd4, 0x70, 0x46, 0x0b, 0x9a, 0x65, 0xc8, 0x60, 0xbf, 0x96, 0x21, 0x43, 0x3d, 0xfc, - 0xe0, 0x7e, 0xd6, 0x82, 0x71, 0x35, 0x4c, 0xef, 0x10, 0x2b, 0x75, 0xd5, 0x9f, 0x9c, 0x03, 0xb4, - 0xa6, 0x75, 0x99, 0x5d, 0x2c, 0xdf, 0xce, 0xfc, 0x19, 0x9d, 0x96, 0xfb, 0x16, 0x51, 0x31, 0x03, - 0xe7, 0x84, 0x7f, 0xa2, 0x28, 0xbd, 0x7f, 0x30, 0x37, 0xae, 0xfe, 0xf1, 0x18, 0xc5, 0x71, 0x15, - 0x7a, 0x24, 0x4f, 0x26, 0x96, 0x22, 0x7a, 0x11, 0x06, 0x3b, 0x3b, 0x4e, 0x48, 0x12, 0xde, 0x3c, - 0x83, 0x35, 0x5a, 0x78, 0xff, 0x60, 0x6e, 0x42, 0x55, 0x60, 0x25, 0x98, 0x63, 0xf7, 0x1f, 0x0b, - 0x2c, 0xbd, 0x38, 0x7b, 0xc6, 0x02, 0xfb, 0x13, 0x0b, 0x06, 0xd6, 0xfd, 0xe6, 0x71, 0x1c, 0x01, - 0xaf, 0x1a, 0x47, 0xc0, 0x63, 0x79, 0xe1, 0xe3, 0x73, 0x77, 0xff, 0x4a, 0x62, 0xf7, 0x9f, 0xcb, - 0xa5, 0x50, 0xbc, 0xf1, 0xdb, 0x30, 0xca, 0x82, 0xd2, 0x0b, 0xcf, 0xa5, 0xe7, 0x8d, 0x0d, 0x3f, - 0x97, 0xd8, 0xf0, 0x93, 0x1a, 0xaa, 0xb6, 0xd3, 0x9f, 0x82, 0x61, 0xe1, 0x0a, 0x93, 0x74, 0x0b, - 0x15, 0xb8, 0x58, 0xc2, 0xed, 0x9f, 0x28, 0x83, 0x11, 0x04, 0x1f, 0xfd, 0xb2, 0x05, 0xf3, 0x01, - 0x37, 0x91, 0x6d, 0x56, 0xbb, 0x81, 0xeb, 0x6d, 0xd7, 0x1b, 0x3b, 0xa4, 0xd9, 0x6d, 0xb9, 0xde, - 0xf6, 0xea, 0xb6, 0xe7, 0xab, 0xe2, 0xe5, 0xbb, 0xa4, 0xd1, 0x65, 0x6a, 0xb7, 0x1e, 0x11, 0xf7, - 0x95, 0xa9, 0xf9, 0x73, 0xf7, 0x0e, 0xe6, 0xe6, 0xf1, 0xa1, 0x68, 0xe3, 0x43, 0xf6, 0x05, 0xfd, - 0xba, 0x05, 0x97, 0x78, 0x6c, 0xf8, 0xfe, 0xfb, 0x5f, 0xf0, 0x5a, 0xae, 0x49, 0x52, 0x31, 0x91, - 0x0d, 0x12, 0xb4, 0x17, 0x3f, 0x24, 0x06, 0xf4, 0x52, 0xed, 0x70, 0x6d, 0xe1, 0xc3, 0x76, 0xce, - 0xfe, 0x17, 0x65, 0x18, 0x17, 0x31, 0xa3, 0xc4, 0x1d, 0xf0, 0xa2, 0xb1, 0x24, 0x1e, 0x4f, 0x2c, - 0x89, 0x69, 0x03, 0xf9, 0x68, 0x8e, 0xff, 0x10, 0xa6, 0xe9, 0xe1, 0x7c, 0x95, 0x38, 0x41, 0xb4, - 0x49, 0x1c, 0x6e, 0xf0, 0x55, 0x3e, 0xf4, 0xe9, 0xaf, 0xe4, 0x93, 0xd7, 0x93, 0xc4, 0x70, 0x9a, - 0xfe, 0xb7, 0xd2, 0x9d, 0xe3, 0xc1, 0x54, 0x2a, 0xec, 0xd7, 0x27, 0xa0, 0xa2, 0xfc, 0x38, 0xc4, - 0xa1, 0x53, 0x1c, 0x3d, 0x2f, 0x49, 0x81, 0x8b, 0xbf, 0x62, 0x1f, 0xa2, 0x98, 0x9c, 0xfd, 0x8f, - 0x4a, 0x46, 0x83, 0x7c, 0x12, 0xd7, 0x61, 0xc4, 0x09, 0x43, 0x77, 0xdb, 0x23, 0xcd, 0x22, 0x09, - 0x65, 0xaa, 0x19, 0xe6, 0x4b, 0xb3, 0x20, 0x6a, 0x62, 0x45, 0x03, 0x5d, 0xe5, 0x66, 0x75, 0x7b, - 0xa4, 0x48, 0x3c, 0x99, 0xa2, 0x06, 0xd2, 0xf0, 0x6e, 0x8f, 0x60, 0x51, 0x1f, 0x7d, 0x92, 0xdb, - 0x3d, 0x5e, 0xf3, 0xfc, 0x3b, 0xde, 0x15, 0xdf, 0x97, 0x71, 0x19, 0xfa, 0x23, 0x38, 0x2d, 0xad, - 0x1d, 0x55, 0x75, 0x6c, 0x52, 0xeb, 0x2f, 0x8e, 0xe6, 0x67, 0xe1, 0x04, 0x25, 0x6d, 0xba, 0x4d, - 0x87, 0x88, 0xc0, 0xa4, 0x08, 0x48, 0x26, 0xcb, 0xc4, 0xd8, 0x65, 0x3e, 0xe5, 0xcc, 0xda, 0xb1, - 0x20, 0xfd, 0x9a, 0x49, 0x02, 0x27, 0x69, 0xda, 0x3f, 0x6d, 0x01, 0x73, 0x21, 0x3d, 0x06, 0x7e, - 0xe4, 0x23, 0x26, 0x3f, 0x32, 0x93, 0x37, 0xc8, 0x39, 0xac, 0xc8, 0x0b, 0x7c, 0x65, 0xd5, 0x02, - 0xff, 0xee, 0xbe, 0x30, 0x56, 0xe9, 0xfd, 0xfe, 0xb0, 0xff, 0xaf, 0xc5, 0x0f, 0x31, 0xe5, 0x65, - 0x81, 0xbe, 0x13, 0x46, 0x1a, 0x4e, 0xc7, 0x69, 0xf0, 0x8c, 0x2d, 0xb9, 0x12, 0x3d, 0xa3, 0xd2, - 0xfc, 0x92, 0xa8, 0xc1, 0x25, 0x54, 0x32, 0xb0, 0xdd, 0x88, 0x2c, 0xee, 0x29, 0x95, 0x52, 0x4d, - 0xce, 0xee, 0xc2, 0xb8, 0x41, 0xec, 0xa1, 0x8a, 0x33, 0xbe, 0x93, 0x5f, 0xb1, 0x2a, 0x10, 0x63, - 0x1b, 0xa6, 0x3d, 0xed, 0x3f, 0xbd, 0x50, 0xe4, 0xe3, 0xf2, 0xbd, 0xbd, 0x2e, 0x51, 0x76, 0xfb, - 0x68, 0xde, 0xa9, 0x09, 0x32, 0x38, 0x4d, 0xd9, 0xfe, 0x49, 0x0b, 0x1e, 0xd1, 0x11, 0x35, 0x07, - 0x98, 0x5e, 0x4a, 0x92, 0x2a, 0x8c, 0xf8, 0x1d, 0x12, 0x38, 0x91, 0x1f, 0x88, 0x5b, 0xe3, 0xa2, - 0x1c, 0xf4, 0x1b, 0xa2, 0xfc, 0xbe, 0x88, 0x77, 0x2e, 0xa9, 0xcb, 0x72, 0xac, 0x6a, 0xd2, 0xd7, - 0x27, 0x1b, 0x8c, 0x50, 0xb8, 0x3a, 0xb1, 0x33, 0x80, 0x69, 0xd2, 0x43, 0x2c, 0x20, 0xf6, 0xd7, - 0x2d, 0xbe, 0xb0, 0xf4, 0xae, 0xa3, 0x37, 0x61, 0xaa, 0xed, 0x44, 0x8d, 0x9d, 0xe5, 0xbb, 0x9d, - 0x80, 0xab, 0x9c, 0xe4, 0x38, 0x3d, 0xdd, 0x6b, 0x9c, 0xb4, 0x8f, 0x8c, 0x4d, 0x39, 0xd7, 0x12, - 0xc4, 0x70, 0x8a, 0x3c, 0xda, 0x84, 0x51, 0x56, 0xc6, 0xbc, 0xf8, 0xc2, 0x22, 0xd6, 0x20, 0xaf, - 0x35, 0x65, 0x8c, 0xb0, 0x16, 0xd3, 0xc1, 0x3a, 0x51, 0xfb, 0xcb, 0x65, 0xbe, 0xdb, 0x19, 0x2b, - 0xff, 0x14, 0x0c, 0x77, 0xfc, 0xe6, 0xd2, 0x6a, 0x15, 0x8b, 0x59, 0x50, 0xd7, 0x48, 0x8d, 0x17, - 0x63, 0x09, 0x47, 0x17, 0x61, 0x44, 0xfc, 0x94, 0x2a, 0x42, 0x76, 0x36, 0x0b, 0xbc, 0x10, 0x2b, - 0x28, 0x7a, 0x0e, 0xa0, 0x13, 0xf8, 0x7b, 0x6e, 0x93, 0x45, 0x97, 0x28, 0x9b, 0x76, 0x44, 0x35, - 0x05, 0xc1, 0x1a, 0x16, 0x7a, 0x05, 0xc6, 0xbb, 0x5e, 0xc8, 0xd9, 0x11, 0x2d, 0x96, 0xac, 0xb2, - 0x70, 0xb9, 0xa9, 0x03, 0xb1, 0x89, 0x8b, 0x16, 0x60, 0x28, 0x72, 0x98, 0x5d, 0xcc, 0x60, 0xbe, - 0xb9, 0xef, 0x06, 0xc5, 0xd0, 0x93, 0x83, 0xd0, 0x0a, 0x58, 0x54, 0x44, 0x9f, 0x90, 0x0e, 0xb5, - 0xfc, 0x60, 0x17, 0x76, 0xf6, 0xfd, 0x5d, 0x02, 0x9a, 0x3b, 0xad, 0xb0, 0xdf, 0x37, 0x68, 0xa1, - 0x97, 0x01, 0xc8, 0xdd, 0x88, 0x04, 0x9e, 0xd3, 0x52, 0xd6, 0x6c, 0x8a, 0x2f, 0xa8, 0xfa, 0xeb, - 0x7e, 0x74, 0x33, 0x24, 0xcb, 0x0a, 0x03, 0x6b, 0xd8, 0xf6, 0xaf, 0x57, 0x00, 0x62, 0xbe, 0x1d, - 0xbd, 0x95, 0x3a, 0xb8, 0x9e, 0x29, 0xe6, 0xf4, 0x8f, 0xee, 0xd4, 0x42, 0xdf, 0x67, 0xc1, 0xa8, - 0xd3, 0x6a, 0xf9, 0x0d, 0x87, 0x47, 0xfb, 0x2d, 0x15, 0x1f, 0x9c, 0xa2, 0xfd, 0x85, 0xb8, 0x06, - 0xef, 0xc2, 0xf3, 0x72, 0x85, 0x6a, 0x90, 0x9e, 0xbd, 0xd0, 0x1b, 0x46, 0x1f, 0x90, 0x4f, 0xc5, - 0xb2, 0x31, 0x94, 0xea, 0xa9, 0x58, 0x61, 0x77, 0x84, 0xfe, 0x4a, 0xbc, 0x69, 0xbc, 0x12, 0x07, - 0xf2, 0x3d, 0x06, 0x0d, 0xf6, 0xb5, 0xd7, 0x03, 0x11, 0xd5, 0xf4, 0xe8, 0x01, 0x83, 0xf9, 0xee, - 0x79, 0xda, 0x3b, 0xa9, 0x47, 0xe4, 0x80, 0xcf, 0xc0, 0x64, 0xd3, 0x64, 0x02, 0xc4, 0x4a, 0x7c, - 0x32, 0x8f, 0x6e, 0x82, 0x67, 0x88, 0xaf, 0xfd, 0x04, 0x00, 0x27, 0x09, 0xa3, 0x1a, 0x0f, 0x26, - 0xb1, 0xea, 0x6d, 0xf9, 0xc2, 0xd7, 0xc3, 0xce, 0x9d, 0xcb, 0xfd, 0x30, 0x22, 0x6d, 0x8a, 0x19, - 0xdf, 0xee, 0xeb, 0xa2, 0x2e, 0x56, 0x54, 0xd0, 0x6b, 0x30, 0xc4, 0xfc, 0xb3, 0xc2, 0x99, 0x91, - 0x7c, 0x89, 0xb3, 0x19, 0x1d, 0x2d, 0xde, 0x90, 0xec, 0x6f, 0x88, 0x05, 0x05, 0x74, 0x55, 0x7a, - 0x3f, 0x86, 0xab, 0xde, 0xcd, 0x90, 0x30, 0xef, 0xc7, 0xca, 0xe2, 0x7b, 0x63, 0xc7, 0x46, 0x5e, - 0x9e, 0x99, 0x42, 0xcc, 0xa8, 0x49, 0xb9, 0x28, 0xf1, 0x5f, 0x66, 0x26, 0x9b, 0x81, 0xfc, 0xee, - 0x99, 0xd9, 0xcb, 0xe2, 0xe1, 0xbc, 0x65, 0x92, 0xc0, 0x49, 0x9a, 0x94, 0x23, 0xe5, 0xbb, 0x5e, - 0x78, 0x8b, 0xf4, 0x3a, 0x3b, 0xf8, 0x43, 0x9c, 0xdd, 0x46, 0xbc, 0x04, 0x8b, 0xfa, 0xc7, 0xca, - 0x1e, 0xcc, 0x7a, 0x30, 0x95, 0xdc, 0xa2, 0x0f, 0x95, 0x1d, 0xf9, 0x83, 0x01, 0x98, 0x30, 0x97, - 0x14, 0xba, 0x04, 0x15, 0x41, 0x44, 0x65, 0x13, 0x50, 0xbb, 0x64, 0x4d, 0x02, 0x70, 0x8c, 0xc3, - 0x92, 0x48, 0xb0, 0xea, 0x9a, 0x79, 0x70, 0x9c, 0x44, 0x42, 0x41, 0xb0, 0x86, 0x45, 0x1f, 0x56, - 0x9b, 0xbe, 0x1f, 0xa9, 0x0b, 0x49, 0xad, 0xbb, 0x45, 0x56, 0x8a, 0x05, 0x94, 0x5e, 0x44, 0xbb, - 0x24, 0xf0, 0x48, 0xcb, 0x8c, 0x3b, 0xac, 0x2e, 0xa2, 0x6b, 0x3a, 0x10, 0x9b, 0xb8, 0xf4, 0x3a, - 0xf5, 0x43, 0xb6, 0x90, 0xc5, 0xf3, 0x2d, 0x36, 0xb7, 0xae, 0x73, 0x07, 0x6c, 0x09, 0x47, 0x1f, - 0x87, 0x47, 0x54, 0x6c, 0x25, 0xcc, 0xb5, 0x19, 0xb2, 0xc5, 0x21, 0x43, 0xda, 0xf2, 0xc8, 0x52, - 0x36, 0x1a, 0xce, 0xab, 0x8f, 0x5e, 0x85, 0x09, 0xc1, 0xe2, 0x4b, 0x8a, 0xc3, 0xa6, 0x85, 0xd1, - 0x35, 0x03, 0x8a, 0x13, 0xd8, 0x32, 0x72, 0x32, 0xe3, 0xb2, 0x25, 0x85, 0x91, 0x74, 0xe4, 0x64, - 0x1d, 0x8e, 0x53, 0x35, 0xd0, 0x02, 0x4c, 0x72, 0x1e, 0xcc, 0xf5, 0xb6, 0xf9, 0x9c, 0x08, 0x67, - 0x2e, 0xb5, 0xa5, 0x6e, 0x98, 0x60, 0x9c, 0xc4, 0x47, 0x2f, 0xc1, 0x98, 0x13, 0x34, 0x76, 0xdc, - 0x88, 0x34, 0xa2, 0x6e, 0xc0, 0xbd, 0xbc, 0x34, 0x13, 0xad, 0x05, 0x0d, 0x86, 0x0d, 0x4c, 0xfb, - 0x2d, 0x38, 0x91, 0x11, 0x99, 0x81, 0x2e, 0x1c, 0xa7, 0xe3, 0xca, 0x6f, 0x4a, 0x58, 0x38, 0x2f, - 0xd4, 0x56, 0xe5, 0xd7, 0x68, 0x58, 0x74, 0x75, 0xb2, 0x08, 0x0e, 0x5a, 0x22, 0x42, 0xb5, 0x3a, - 0x57, 0x24, 0x00, 0xc7, 0x38, 0xf6, 0xff, 0x2c, 0xc1, 0x64, 0x86, 0x6e, 0x85, 0x25, 0xc3, 0x4b, - 0x3c, 0x52, 0xe2, 0xdc, 0x77, 0x66, 0x20, 0xee, 0xd2, 0x21, 0x02, 0x71, 0x97, 0x7b, 0x05, 0xe2, - 0x1e, 0x78, 0x3b, 0x81, 0xb8, 0xcd, 0x11, 0x1b, 0xec, 0x6b, 0xc4, 0x32, 0x82, 0x77, 0x0f, 0x1d, - 0x32, 0x78, 0xb7, 0x31, 0xe8, 0xc3, 0x7d, 0x0c, 0xfa, 0x8f, 0x96, 0x60, 0x2a, 0x69, 0x4a, 0x7a, - 0x0c, 0x72, 0xdb, 0xd7, 0x0c, 0xb9, 0xed, 0xc5, 0x7e, 0x9c, 0x6f, 0x73, 0x65, 0xb8, 0x38, 0x21, - 0xc3, 0x7d, 0x7f, 0x5f, 0xd4, 0x8a, 0xe5, 0xb9, 0x7f, 0xab, 0x04, 0xa7, 0x32, 0xbd, 0x7f, 0x8f, - 0x61, 0x6c, 0x6e, 0x18, 0x63, 0xf3, 0x6c, 0xdf, 0x8e, 0xc9, 0xb9, 0x03, 0x74, 0x3b, 0x31, 0x40, - 0x97, 0xfa, 0x27, 0x59, 0x3c, 0x4a, 0x5f, 0x2b, 0xc3, 0xb9, 0xcc, 0x7a, 0xb1, 0xd8, 0x73, 0xc5, - 0x10, 0x7b, 0x3e, 0x97, 0x10, 0x7b, 0xda, 0xc5, 0xb5, 0x8f, 0x46, 0x0e, 0x2a, 0x1c, 0x74, 0x59, - 0x98, 0x81, 0x07, 0x94, 0x81, 0x1a, 0x0e, 0xba, 0x8a, 0x10, 0x36, 0xe9, 0x7e, 0x2b, 0xc9, 0x3e, - 0xff, 0xbd, 0x05, 0x67, 0x32, 0xe7, 0xe6, 0x18, 0x64, 0x5d, 0xeb, 0xa6, 0xac, 0xeb, 0xa9, 0xbe, - 0x57, 0x6b, 0x8e, 0xf0, 0xeb, 0xcb, 0x83, 0x39, 0xdf, 0xc2, 0x5e, 0xf2, 0x37, 0x60, 0xd4, 0x69, - 0x34, 0x48, 0x18, 0xae, 0xf9, 0x4d, 0x15, 0x6b, 0xf8, 0x59, 0xf6, 0xce, 0x8a, 0x8b, 0xef, 0x1f, - 0xcc, 0xcd, 0x26, 0x49, 0xc4, 0x60, 0xac, 0x53, 0x40, 0x9f, 0x84, 0x91, 0x50, 0xdc, 0x9b, 0x62, - 0xee, 0x9f, 0xef, 0x73, 0x70, 0x9c, 0x4d, 0xd2, 0x32, 0x83, 0x21, 0x29, 0x49, 0x85, 0x22, 0x69, - 0x06, 0x4e, 0x29, 0x1d, 0x69, 0xe0, 0x94, 0xe7, 0x00, 0xf6, 0xd4, 0x63, 0x20, 0x29, 0x7f, 0xd0, - 0x9e, 0x09, 0x1a, 0x16, 0xfa, 0x28, 0x4c, 0x85, 0x3c, 0x5a, 0xe0, 0x52, 0xcb, 0x09, 0x99, 0x1f, - 0x8d, 0x58, 0x85, 0x2c, 0xe0, 0x52, 0x3d, 0x01, 0xc3, 0x29, 0x6c, 0xb4, 0x22, 0x5b, 0x65, 0xa1, - 0x0d, 0xf9, 0xc2, 0xbc, 0x10, 0xb7, 0x28, 0x52, 0xf1, 0x9e, 0x4c, 0x0e, 0x3f, 0x1b, 0x78, 0xad, - 0x26, 0xfa, 0x24, 0x00, 0x5d, 0x3e, 0x42, 0x0e, 0x31, 0x9c, 0x7f, 0x78, 0xd2, 0x53, 0xa5, 0x99, - 0x69, 0xdc, 0xcc, 0x7c, 0x6a, 0xab, 0x8a, 0x08, 0xd6, 0x08, 0xa2, 0x2d, 0x18, 0x8f, 0xff, 0xc5, - 0x99, 0x2a, 0x0f, 0xd9, 0x02, 0x93, 0x7b, 0x57, 0x75, 0x3a, 0xd8, 0x24, 0x6b, 0xff, 0xf8, 0x30, - 0x3c, 0x5a, 0x70, 0x16, 0xa3, 0x05, 0x53, 0xdf, 0xfb, 0x74, 0xf2, 0x11, 0x3f, 0x9b, 0x59, 0xd9, - 0x78, 0xd5, 0x27, 0x96, 0x7c, 0xe9, 0x6d, 0x2f, 0xf9, 0x1f, 0xb2, 0x34, 0xf1, 0x0a, 0xb7, 0x2c, - 0xfd, 0xc8, 0x21, 0xef, 0x98, 0x23, 0x94, 0xb7, 0x6c, 0x65, 0x08, 0x2d, 0x9e, 0xeb, 0xbb, 0x3b, - 0xfd, 0x4b, 0x31, 0xbe, 0x62, 0x01, 0x12, 0xe2, 0x15, 0xd2, 0x54, 0x1b, 0x4a, 0xc8, 0x33, 0xae, - 0x1c, 0xf6, 0xfb, 0x17, 0x52, 0x94, 0xf8, 0x48, 0xbc, 0x2c, 0x2f, 0x83, 0x34, 0x42, 0xcf, 0x31, - 0xc9, 0xe8, 0x1e, 0xfa, 0x38, 0x8b, 0xa6, 0xeb, 0xbe, 0x25, 0x38, 0x20, 0xb1, 0xe1, 0x5e, 0x14, - 0x91, 0x74, 0x55, 0x39, 0x65, 0x75, 0x33, 0xbb, 0xab, 0x23, 0x61, 0x83, 0xd4, 0xf1, 0xbe, 0xbf, - 0xbb, 0xf0, 0x48, 0xce, 0x90, 0x3d, 0xd4, 0x67, 0xf8, 0x6f, 0x59, 0x70, 0xb6, 0x30, 0x2c, 0xcc, - 0x37, 0x21, 0x83, 0x68, 0x7f, 0xce, 0x82, 0xec, 0xc9, 0x36, 0xcc, 0xca, 0x2e, 0x41, 0xa5, 0x41, - 0x0b, 0x35, 0x3f, 0xe0, 0x38, 0x40, 0x82, 0x04, 0xe0, 0x18, 0xc7, 0xb0, 0x1e, 0x2b, 0xf5, 0xb4, - 0x1e, 0xfb, 0x15, 0x0b, 0x52, 0x87, 0xfc, 0x31, 0x70, 0x1b, 0xab, 0x26, 0xb7, 0xf1, 0xde, 0x7e, - 0x46, 0x33, 0x87, 0xd1, 0xf8, 0xe3, 0x49, 0x38, 0x9d, 0xe3, 0x96, 0xb7, 0x07, 0xd3, 0xdb, 0x0d, - 0x62, 0x7a, 0x58, 0x17, 0x45, 0x1e, 0x2a, 0x74, 0xc7, 0x66, 0xc9, 0x61, 0xa7, 0x53, 0x28, 0x38, - 0xdd, 0x04, 0xfa, 0x9c, 0x05, 0x27, 0x9d, 0x3b, 0xe1, 0x32, 0xe5, 0x1a, 0xdd, 0xc6, 0x62, 0xcb, - 0x6f, 0xec, 0xd2, 0x2b, 0x59, 0x6e, 0x84, 0x17, 0x32, 0x25, 0x79, 0xb7, 0xeb, 0x29, 0x7c, 0xa3, - 0x79, 0x96, 0x2d, 0x37, 0x0b, 0x0b, 0x67, 0xb6, 0x85, 0xb0, 0x48, 0xa1, 0x40, 0xdf, 0xa4, 0x05, - 0x31, 0x00, 0xb2, 0xfc, 0x27, 0x39, 0x1b, 0x24, 0x21, 0x58, 0xd1, 0x41, 0x9f, 0x86, 0xca, 0xb6, - 0x74, 0xf7, 0xcd, 0x60, 0xb3, 0xe2, 0x81, 0x2c, 0x76, 0x82, 0xe6, 0xea, 0x78, 0x85, 0x84, 0x63, - 0xa2, 0xe8, 0x55, 0x28, 0x7b, 0x5b, 0x61, 0x51, 0xc2, 0xd9, 0x84, 0xdd, 0x25, 0x8f, 0xb4, 0xb1, - 0xbe, 0x52, 0xc7, 0xb4, 0x22, 0xba, 0x0a, 0xe5, 0x60, 0xb3, 0x29, 0xc4, 0xd0, 0x99, 0x9b, 0x14, - 0x2f, 0x56, 0x73, 0x7a, 0xc5, 0x28, 0xe1, 0xc5, 0x2a, 0xa6, 0x24, 0x50, 0x0d, 0x06, 0x99, 0x2f, - 0x9b, 0x60, 0x6a, 0x32, 0x9f, 0x6f, 0x05, 0x3e, 0xa1, 0x3c, 0x1c, 0x07, 0x43, 0xc0, 0x9c, 0x10, - 0xda, 0x80, 0xa1, 0x06, 0x4b, 0x4e, 0x2a, 0xb8, 0x98, 0x0f, 0x64, 0x0a, 0x9c, 0x0b, 0xb2, 0xb6, - 0x0a, 0xf9, 0x2b, 0xc3, 0xc0, 0x82, 0x16, 0xa3, 0x4a, 0x3a, 0x3b, 0x5b, 0xa1, 0x48, 0xa6, 0x9d, - 0x4d, 0xb5, 0x20, 0x19, 0xb1, 0xa0, 0xca, 0x30, 0xb0, 0xa0, 0x85, 0x5e, 0x86, 0xd2, 0x56, 0x43, - 0xf8, 0xa9, 0x65, 0x4a, 0x9e, 0xcd, 0x60, 0x29, 0x8b, 0x43, 0xf7, 0x0e, 0xe6, 0x4a, 0x2b, 0x4b, - 0xb8, 0xb4, 0xd5, 0x40, 0xeb, 0x30, 0xbc, 0xc5, 0xc3, 0x2b, 0x08, 0xe1, 0xf2, 0x93, 0xd9, 0x91, - 0x1f, 0x52, 0x11, 0x18, 0xb8, 0xcf, 0x93, 0x00, 0x60, 0x49, 0x84, 0x65, 0x24, 0x50, 0x61, 0x22, - 0x44, 0x94, 0xba, 0xf9, 0xc3, 0x85, 0xf6, 0xe0, 0x4c, 0x66, 0x1c, 0x6c, 0x02, 0x6b, 0x14, 0xe9, - 0xaa, 0x76, 0xde, 0xea, 0x06, 0x2c, 0x14, 0xb8, 0x08, 0x67, 0x94, 0xb9, 0xaa, 0x17, 0x24, 0x52, - 0xd1, 0xaa, 0x56, 0x48, 0x38, 0x26, 0x8a, 0x76, 0x61, 0x7c, 0x2f, 0xec, 0xec, 0x10, 0xb9, 0xa5, - 0x59, 0x74, 0xa3, 0x1c, 0xfe, 0xe8, 0x96, 0x40, 0x74, 0x83, 0xa8, 0xeb, 0xb4, 0x52, 0xa7, 0x10, - 0xe3, 0x65, 0x6f, 0xe9, 0xc4, 0xb0, 0x49, 0x9b, 0x0e, 0xff, 0x9b, 0x5d, 0x7f, 0x73, 0x3f, 0x22, - 0x22, 0xb8, 0x5c, 0xe6, 0xf0, 0xbf, 0xce, 0x51, 0xd2, 0xc3, 0x2f, 0x00, 0x58, 0x12, 0x41, 0xb7, - 0xc4, 0xf0, 0xb0, 0xd3, 0x73, 0x2a, 0x3f, 0x02, 0xec, 0x82, 0x44, 0xca, 0x19, 0x14, 0x76, 0x5a, - 0xc6, 0xa4, 0xd8, 0x29, 0xd9, 0xd9, 0xf1, 0x23, 0xdf, 0x4b, 0x9c, 0xd0, 0xd3, 0xf9, 0xa7, 0x64, - 0x2d, 0x03, 0x3f, 0x7d, 0x4a, 0x66, 0x61, 0xe1, 0xcc, 0xb6, 0x50, 0x13, 0x26, 0x3a, 0x7e, 0x10, - 0xdd, 0xf1, 0x03, 0xb9, 0xbe, 0x50, 0x81, 0x70, 0xcc, 0xc0, 0x14, 0x2d, 0xb2, 0xb8, 0x8d, 0x26, - 0x04, 0x27, 0x68, 0xa2, 0x8f, 0xc1, 0x70, 0xd8, 0x70, 0x5a, 0x64, 0xf5, 0xc6, 0xcc, 0x89, 0xfc, - 0xeb, 0xa7, 0xce, 0x51, 0x72, 0x56, 0x17, 0x8f, 0x8e, 0xc1, 0x51, 0xb0, 0x24, 0x87, 0x56, 0x60, - 0x90, 0x65, 0x9c, 0x63, 0x91, 0x10, 0x73, 0x02, 0xd9, 0xa6, 0xac, 0xe0, 0xf9, 0xd9, 0xc4, 0x8a, - 0x31, 0xaf, 0x4e, 0xf7, 0x80, 0x78, 0x23, 0xfa, 0xe1, 0xcc, 0xa9, 0xfc, 0x3d, 0x20, 0x9e, 0x96, - 0x37, 0xea, 0x45, 0x7b, 0x40, 0x21, 0xe1, 0x98, 0x28, 0x3d, 0x99, 0xe9, 0x69, 0x7a, 0xba, 0xc0, - 0x7c, 0x2b, 0xf7, 0x2c, 0x65, 0x27, 0x33, 0x3d, 0x49, 0x29, 0x09, 0xfb, 0xf7, 0x86, 0xd3, 0x3c, - 0x0b, 0x93, 0x2a, 0x7c, 0x8f, 0x95, 0x52, 0x38, 0x7f, 0xb0, 0x5f, 0x21, 0xe7, 0x11, 0x3e, 0x85, - 0x3e, 0x67, 0xc1, 0xe9, 0x4e, 0xe6, 0x87, 0x08, 0x06, 0xa0, 0x3f, 0x59, 0x29, 0xff, 0x74, 0x15, - 0x35, 0x33, 0x1b, 0x8e, 0x73, 0x5a, 0x4a, 0x3e, 0x37, 0xcb, 0x6f, 0xfb, 0xb9, 0xb9, 0x06, 0x23, - 0x0d, 0xfe, 0x14, 0x29, 0x4c, 0xd6, 0x9d, 0x7c, 0x7b, 0x33, 0x56, 0x42, 0xbc, 0x61, 0xb6, 0xb0, - 0x22, 0x81, 0x7e, 0xd8, 0x82, 0xb3, 0xc9, 0xae, 0x63, 0xc2, 0xc0, 0x22, 0xd4, 0x26, 0x17, 0x68, - 0xac, 0x88, 0xef, 0x4f, 0xf1, 0xff, 0x06, 0xf2, 0xfd, 0x5e, 0x08, 0xb8, 0xb8, 0x31, 0x54, 0xcd, - 0x90, 0xa8, 0x0c, 0x99, 0x5a, 0xa4, 0x3e, 0xa4, 0x2a, 0x2f, 0xc0, 0x58, 0xdb, 0xef, 0x7a, 0x91, - 0xb0, 0xf6, 0x12, 0x96, 0x27, 0xcc, 0xe2, 0x62, 0x4d, 0x2b, 0xc7, 0x06, 0x56, 0x42, 0x16, 0x33, - 0xf2, 0xc0, 0xb2, 0x98, 0x37, 0x60, 0xcc, 0xd3, 0xcc, 0x93, 0x05, 0x3f, 0x70, 0x21, 0x3f, 0x4c, - 0xae, 0x6e, 0xcc, 0xcc, 0x7b, 0xa9, 0x97, 0x60, 0x83, 0xda, 0xf1, 0x9a, 0x81, 0x7d, 0xc9, 0xca, - 0x60, 0xea, 0xb9, 0x28, 0xe6, 0xc3, 0xa6, 0x28, 0xe6, 0x42, 0x52, 0x14, 0x93, 0xd2, 0x20, 0x18, - 0x52, 0x98, 0xfe, 0xb3, 0x00, 0xf5, 0x1b, 0x6a, 0xd3, 0x6e, 0xc1, 0xf9, 0x5e, 0xd7, 0x12, 0x33, - 0xfb, 0x6b, 0x2a, 0x7d, 0x71, 0x6c, 0xf6, 0xd7, 0x5c, 0xad, 0x62, 0x06, 0xe9, 0x37, 0x88, 0x93, - 0xfd, 0xdf, 0x2d, 0x28, 0xd7, 0xfc, 0xe6, 0x31, 0x3c, 0x78, 0x3f, 0x62, 0x3c, 0x78, 0x1f, 0xcd, - 0xbe, 0x10, 0x9b, 0xb9, 0xfa, 0x8f, 0xe5, 0x84, 0xfe, 0xe3, 0x6c, 0x1e, 0x81, 0x62, 0x6d, 0xc7, - 0x4f, 0x95, 0x61, 0xb4, 0xe6, 0x37, 0x95, 0xcd, 0xfd, 0xbf, 0x7a, 0x10, 0x9b, 0xfb, 0xdc, 0x5c, - 0x16, 0x1a, 0x65, 0x66, 0x2d, 0x28, 0xdd, 0x8d, 0xbf, 0xc9, 0x4c, 0xef, 0x6f, 0x13, 0x77, 0x7b, - 0x27, 0x22, 0xcd, 0xe4, 0xe7, 0x1c, 0x9f, 0xe9, 0xfd, 0xef, 0x95, 0x60, 0x32, 0xd1, 0x3a, 0x6a, - 0xc1, 0x78, 0x4b, 0x97, 0xae, 0x8b, 0x75, 0xfa, 0x40, 0x82, 0x79, 0x61, 0xba, 0xac, 0x15, 0x61, - 0x93, 0x38, 0x9a, 0x07, 0x50, 0xea, 0x66, 0x29, 0x5e, 0x65, 0x5c, 0xbf, 0xd2, 0x47, 0x87, 0x58, - 0xc3, 0x40, 0x2f, 0xc2, 0x68, 0xe4, 0x77, 0xfc, 0x96, 0xbf, 0xbd, 0x7f, 0x8d, 0xc8, 0xf8, 0x5e, - 0xca, 0x20, 0x71, 0x23, 0x06, 0x61, 0x1d, 0x0f, 0xdd, 0x85, 0x69, 0x45, 0xa4, 0x7e, 0x04, 0x1a, - 0x07, 0x26, 0x55, 0x58, 0x4f, 0x52, 0xc4, 0xe9, 0x46, 0xec, 0x9f, 0x29, 0xf3, 0x21, 0xf6, 0x22, - 0xf7, 0xdd, 0xdd, 0xf0, 0xce, 0xde, 0x0d, 0x5f, 0xb3, 0x60, 0x8a, 0xb6, 0xce, 0xac, 0xad, 0xe4, - 0x35, 0xaf, 0x02, 0x73, 0x5b, 0x05, 0x81, 0xb9, 0x2f, 0xd0, 0x53, 0xb3, 0xe9, 0x77, 0x23, 0x21, - 0xbb, 0xd3, 0x8e, 0x45, 0x5a, 0x8a, 0x05, 0x54, 0xe0, 0x91, 0x20, 0x10, 0x1e, 0xa2, 0x3a, 0x1e, - 0x09, 0x02, 0x2c, 0xa0, 0x32, 0x6e, 0xf7, 0x40, 0x76, 0xdc, 0x6e, 0x1e, 0x7e, 0x55, 0xd8, 0xe5, - 0x08, 0x86, 0x4b, 0x0b, 0xbf, 0x2a, 0x0d, 0x76, 0x62, 0x1c, 0xfb, 0x2b, 0x65, 0x18, 0xab, 0xf9, - 0xcd, 0x58, 0xd5, 0xfc, 0x82, 0xa1, 0x6a, 0x3e, 0x9f, 0x50, 0x35, 0x4f, 0xe9, 0xb8, 0xef, 0x2a, - 0x96, 0xbf, 0x51, 0x8a, 0xe5, 0x7f, 0x6e, 0xb1, 0x59, 0xab, 0xae, 0xd7, 0xb9, 0xf1, 0x1e, 0xba, - 0x0c, 0xa3, 0xec, 0x80, 0x61, 0x2e, 0xc9, 0x52, 0xff, 0xca, 0xf2, 0x51, 0xad, 0xc7, 0xc5, 0x58, - 0xc7, 0x41, 0x17, 0x61, 0x24, 0x24, 0x4e, 0xd0, 0xd8, 0x51, 0xa7, 0xab, 0x50, 0x96, 0xf2, 0x32, - 0xac, 0xa0, 0xe8, 0xf5, 0x38, 0xf2, 0x67, 0x39, 0xdf, 0xc5, 0x51, 0xef, 0x0f, 0xdf, 0x22, 0xf9, - 0xe1, 0x3e, 0xed, 0xdb, 0x80, 0xd2, 0xf8, 0x7d, 0xc4, 0xa6, 0x9b, 0x33, 0x63, 0xd3, 0x55, 0x52, - 0x71, 0xe9, 0xfe, 0xcc, 0x82, 0x89, 0x9a, 0xdf, 0xa4, 0x5b, 0xf7, 0x5b, 0x69, 0x9f, 0xea, 0x61, - 0x8f, 0x87, 0x0a, 0xc2, 0x1e, 0x3f, 0x01, 0x83, 0x35, 0xbf, 0xb9, 0x5a, 0x2b, 0x8a, 0x2f, 0x60, - 0xff, 0x6d, 0x0b, 0x86, 0x6b, 0x7e, 0xf3, 0x18, 0xd4, 0x02, 0x1f, 0x36, 0xd5, 0x02, 0x8f, 0xe4, - 0xac, 0x9b, 0x1c, 0x4d, 0xc0, 0xdf, 0x1c, 0x80, 0x71, 0xda, 0x4f, 0x7f, 0x5b, 0x4e, 0xa5, 0x31, - 0x6c, 0x56, 0x1f, 0xc3, 0x46, 0xb9, 0x70, 0xbf, 0xd5, 0xf2, 0xef, 0x24, 0xa7, 0x75, 0x85, 0x95, - 0x62, 0x01, 0x45, 0xcf, 0xc0, 0x48, 0x27, 0x20, 0x7b, 0xae, 0x2f, 0xd8, 0x5b, 0x4d, 0xc9, 0x52, - 0x13, 0xe5, 0x58, 0x61, 0xd0, 0x67, 0x61, 0xe8, 0x7a, 0xf4, 0x2a, 0x6f, 0xf8, 0x5e, 0x93, 0x4b, - 0xce, 0xcb, 0x22, 0x37, 0x87, 0x56, 0x8e, 0x0d, 0x2c, 0x74, 0x1b, 0x2a, 0xec, 0x3f, 0x3b, 0x76, - 0x0e, 0x9f, 0xe5, 0x55, 0x64, 0xfd, 0x13, 0x04, 0x70, 0x4c, 0x0b, 0x3d, 0x07, 0x10, 0xc9, 0xf8, - 0xf6, 0xa1, 0x88, 0xb6, 0xa6, 0x9e, 0x02, 0x2a, 0xf2, 0x7d, 0x88, 0x35, 0x2c, 0xf4, 0x34, 0x54, - 0x22, 0xc7, 0x6d, 0x5d, 0x77, 0x3d, 0x12, 0x32, 0x89, 0x78, 0x59, 0x26, 0xdf, 0x13, 0x85, 0x38, - 0x86, 0x53, 0x56, 0x8c, 0x45, 0xe2, 0xe0, 0x39, 0xa2, 0x47, 0x18, 0x36, 0x63, 0xc5, 0xae, 0xab, - 0x52, 0xac, 0x61, 0xa0, 0x1d, 0x78, 0xcc, 0xf5, 0x58, 0x1e, 0x0b, 0x52, 0xdf, 0x75, 0x3b, 0x1b, - 0xd7, 0xeb, 0xb7, 0x48, 0xe0, 0x6e, 0xed, 0x2f, 0x3a, 0x8d, 0x5d, 0xe2, 0xc9, 0xfc, 0x9d, 0xef, - 0x15, 0x5d, 0x7c, 0x6c, 0xb5, 0x00, 0x17, 0x17, 0x52, 0xb2, 0x9f, 0x67, 0xeb, 0xfd, 0x46, 0x1d, - 0xbd, 0xdf, 0x38, 0x3a, 0x4e, 0xeb, 0x47, 0xc7, 0xfd, 0x83, 0xb9, 0xa1, 0x1b, 0x75, 0x2d, 0x90, - 0xc4, 0x4b, 0x70, 0xaa, 0xe6, 0x37, 0x6b, 0x7e, 0x10, 0xad, 0xf8, 0xc1, 0x1d, 0x27, 0x68, 0xca, - 0xe5, 0x35, 0x27, 0x43, 0x69, 0xd0, 0xf3, 0x73, 0x90, 0x9f, 0x2e, 0x46, 0x98, 0x8c, 0xe7, 0x19, - 0xc7, 0x76, 0x48, 0x07, 0xb0, 0x06, 0xe3, 0x1d, 0x54, 0x26, 0x98, 0x2b, 0x4e, 0x44, 0xd0, 0x0d, - 0x96, 0xe1, 0x3a, 0xbe, 0x46, 0x45, 0xf5, 0xa7, 0xb4, 0x0c, 0xd7, 0x31, 0x30, 0xf3, 0xde, 0x35, - 0xeb, 0xdb, 0xff, 0x63, 0x90, 0x9d, 0xa8, 0x89, 0x6c, 0x22, 0xe8, 0x53, 0x30, 0x11, 0x92, 0xeb, - 0xae, 0xd7, 0xbd, 0x2b, 0x45, 0x18, 0x05, 0x2e, 0x7c, 0xf5, 0x65, 0x1d, 0x93, 0x0b, 0x42, 0xcd, - 0x32, 0x9c, 0xa0, 0x86, 0xda, 0x30, 0x71, 0xc7, 0xf5, 0x9a, 0xfe, 0x9d, 0x50, 0xd2, 0x1f, 0xc9, - 0x97, 0x87, 0xde, 0xe6, 0x98, 0x89, 0x3e, 0x1a, 0xcd, 0xdd, 0x36, 0x88, 0xe1, 0x04, 0x71, 0xba, - 0x6a, 0x83, 0xae, 0xb7, 0x10, 0xde, 0x0c, 0x49, 0x20, 0x72, 0x95, 0xb3, 0x55, 0x8b, 0x65, 0x21, - 0x8e, 0xe1, 0x74, 0xd5, 0xb2, 0x3f, 0x57, 0x02, 0xbf, 0xcb, 0x53, 0x57, 0x88, 0x55, 0x8b, 0x55, - 0x29, 0xd6, 0x30, 0xe8, 0xae, 0x66, 0xff, 0xd6, 0x7d, 0x0f, 0xfb, 0x7e, 0x24, 0xcf, 0x01, 0xa6, - 0xd3, 0xd7, 0xca, 0xb1, 0x81, 0x85, 0x56, 0x00, 0x85, 0xdd, 0x4e, 0xa7, 0xc5, 0x6c, 0x83, 0x9c, - 0x16, 0x23, 0xc5, 0xed, 0x25, 0xca, 0x3c, 0xf4, 0x6e, 0x3d, 0x05, 0xc5, 0x19, 0x35, 0xe8, 0x01, - 0xbf, 0x25, 0xba, 0x3a, 0xc8, 0xba, 0xca, 0x75, 0x27, 0x75, 0xde, 0x4f, 0x09, 0x43, 0xcb, 0x30, - 0x1c, 0xee, 0x87, 0x8d, 0x48, 0x44, 0x4a, 0xcc, 0x49, 0x18, 0x55, 0x67, 0x28, 0x5a, 0xbe, 0x42, - 0x5e, 0x05, 0xcb, 0xba, 0xa8, 0x01, 0x27, 0x04, 0xc5, 0xa5, 0x1d, 0xc7, 0x53, 0xe9, 0x77, 0xb8, - 0x89, 0xf4, 0xe5, 0x7b, 0x07, 0x73, 0x27, 0x44, 0xcb, 0x3a, 0xf8, 0xfe, 0xc1, 0xdc, 0xe9, 0x9a, - 0xdf, 0xcc, 0x80, 0xe0, 0x2c, 0x6a, 0x7c, 0xf1, 0x35, 0x1a, 0x7e, 0xbb, 0x53, 0x0b, 0xfc, 0x2d, - 0xb7, 0x45, 0x8a, 0xf4, 0x4f, 0x75, 0x03, 0x53, 0x2c, 0x3e, 0xa3, 0x0c, 0x27, 0xa8, 0xd9, 0xdf, - 0xc9, 0x98, 0x20, 0x96, 0x9e, 0x3b, 0xea, 0x06, 0x04, 0xb5, 0x61, 0xbc, 0xc3, 0xb6, 0x89, 0x48, - 0x28, 0x21, 0xd6, 0xfa, 0x0b, 0x7d, 0xca, 0x51, 0xee, 0xd0, 0xbb, 0xc3, 0xb4, 0x31, 0xaa, 0xe9, - 0xe4, 0xb0, 0x49, 0xdd, 0xfe, 0x8d, 0x47, 0xd8, 0x35, 0x5a, 0xe7, 0xc2, 0x91, 0x61, 0xe1, 0x91, - 0x21, 0xde, 0x63, 0xb3, 0xf9, 0x52, 0xba, 0x78, 0x5a, 0x84, 0x57, 0x07, 0x96, 0x75, 0xd1, 0x27, - 0x61, 0x82, 0x3e, 0x6f, 0xd4, 0x55, 0x16, 0xce, 0x9c, 0xcc, 0x8f, 0x9c, 0xa1, 0xb0, 0xf4, 0x64, - 0x33, 0x7a, 0x65, 0x9c, 0x20, 0x86, 0x5e, 0x67, 0x36, 0x3d, 0x92, 0x74, 0xa9, 0x1f, 0xd2, 0xba, - 0xf9, 0x8e, 0x24, 0xab, 0x11, 0x41, 0x5d, 0x38, 0x91, 0x4e, 0x4d, 0x17, 0xce, 0xd8, 0xf9, 0x7c, - 0x62, 0x3a, 0xbb, 0x5c, 0x9c, 0x15, 0x24, 0x0d, 0x0b, 0x71, 0x16, 0x7d, 0x74, 0x1d, 0xc6, 0x45, - 0x8e, 0x6a, 0xb1, 0x72, 0xcb, 0x86, 0xf0, 0x70, 0x1c, 0xeb, 0xc0, 0xfb, 0xc9, 0x02, 0x6c, 0x56, - 0x46, 0xdb, 0x70, 0x56, 0xcb, 0x19, 0x75, 0x25, 0x70, 0x98, 0x05, 0x80, 0xcb, 0x8e, 0x53, 0xed, - 0x82, 0x7f, 0xfc, 0xde, 0xc1, 0xdc, 0xd9, 0x8d, 0x22, 0x44, 0x5c, 0x4c, 0x07, 0xdd, 0x80, 0x53, - 0xdc, 0xef, 0xbb, 0x4a, 0x9c, 0x66, 0xcb, 0xf5, 0x14, 0x07, 0xc1, 0xb7, 0xfc, 0x99, 0x7b, 0x07, - 0x73, 0xa7, 0x16, 0xb2, 0x10, 0x70, 0x76, 0x3d, 0xf4, 0x61, 0xa8, 0x34, 0xbd, 0x50, 0x8c, 0xc1, - 0x90, 0x91, 0x96, 0xab, 0x52, 0x5d, 0xaf, 0xab, 0xef, 0x8f, 0xff, 0xe0, 0xb8, 0x02, 0xda, 0xe6, - 0x02, 0x66, 0x25, 0xf6, 0x18, 0x4e, 0xc5, 0xbd, 0x4a, 0x4a, 0x06, 0x0d, 0xcf, 0x4f, 0xae, 0x59, - 0x51, 0x0e, 0x11, 0x86, 0x53, 0xa8, 0x41, 0x18, 0xbd, 0x06, 0x48, 0x84, 0x7f, 0x5f, 0x68, 0xb0, - 0x6c, 0x25, 0x4c, 0x1e, 0x3f, 0x62, 0xfa, 0x22, 0xd6, 0x53, 0x18, 0x38, 0xa3, 0x16, 0xba, 0x4a, - 0x4f, 0x15, 0xbd, 0x54, 0x9c, 0x5a, 0x2a, 0x89, 0x62, 0x95, 0x74, 0x02, 0xc2, 0x2c, 0x9a, 0x4c, - 0x8a, 0x38, 0x51, 0x0f, 0x35, 0xe1, 0x31, 0xa7, 0x1b, 0xf9, 0x4c, 0x76, 0x6f, 0xa2, 0x6e, 0xf8, - 0xbb, 0xc4, 0x63, 0x6a, 0xb3, 0x91, 0xc5, 0xf3, 0x94, 0x45, 0x59, 0x28, 0xc0, 0xc3, 0x85, 0x54, - 0x28, 0x6b, 0xa9, 0xb2, 0x26, 0x83, 0x19, 0xcd, 0x2b, 0x23, 0x73, 0xf2, 0x8b, 0x30, 0xba, 0xe3, - 0x87, 0xd1, 0x3a, 0x89, 0xee, 0xf8, 0xc1, 0xae, 0x88, 0x4a, 0x1b, 0xc7, 0xf8, 0x8e, 0x41, 0x58, - 0xc7, 0xa3, 0x6f, 0x47, 0x66, 0xd4, 0xb1, 0x5a, 0x65, 0xfa, 0xf4, 0x91, 0xf8, 0x8c, 0xb9, 0xca, - 0x8b, 0xb1, 0x84, 0x4b, 0xd4, 0xd5, 0xda, 0x12, 0xd3, 0x8d, 0x27, 0x50, 0x57, 0x6b, 0x4b, 0x58, - 0xc2, 0xe9, 0x72, 0x0d, 0x77, 0x9c, 0x80, 0xd4, 0x02, 0xbf, 0x41, 0x42, 0x2d, 0xb2, 0xfc, 0xa3, - 0x3c, 0xe6, 0x2e, 0x5d, 0xae, 0xf5, 0x2c, 0x04, 0x9c, 0x5d, 0x0f, 0x91, 0x74, 0xbe, 0xb4, 0x89, - 0x7c, 0xa5, 0x46, 0x9a, 0x9f, 0xe9, 0x33, 0x65, 0x9a, 0x07, 0x53, 0x2a, 0x53, 0x1b, 0x8f, 0xb2, - 0x1b, 0xce, 0x4c, 0xb2, 0xb5, 0xdd, 0x7f, 0x88, 0x5e, 0xa5, 0x26, 0x5a, 0x4d, 0x50, 0xc2, 0x29, - 0xda, 0x46, 0xc0, 0xb6, 0xa9, 0x9e, 0x01, 0xdb, 0x2e, 0x41, 0x25, 0xec, 0x6e, 0x36, 0xfd, 0xb6, - 0xe3, 0x7a, 0x4c, 0x37, 0xae, 0x3d, 0x62, 0xea, 0x12, 0x80, 0x63, 0x1c, 0xb4, 0x02, 0x23, 0x8e, - 0xd4, 0x01, 0xa1, 0xfc, 0x10, 0x3d, 0x4a, 0xf3, 0xc3, 0xa3, 0x56, 0x48, 0xad, 0x8f, 0xaa, 0x8b, - 0x5e, 0x81, 0x71, 0xe1, 0xb7, 0x2c, 0x92, 0x84, 0x9e, 0x30, 0x9d, 0xcb, 0xea, 0x3a, 0x10, 0x9b, - 0xb8, 0xe8, 0x26, 0x8c, 0x46, 0x7e, 0x8b, 0x79, 0x48, 0x51, 0x36, 0xef, 0x74, 0x7e, 0xb0, 0xb9, - 0x0d, 0x85, 0xa6, 0x8b, 0x5f, 0x55, 0x55, 0xac, 0xd3, 0x41, 0x1b, 0x7c, 0xbd, 0xb3, 0x38, 0xf2, - 0x24, 0x9c, 0x79, 0x24, 0xff, 0x4e, 0x52, 0xe1, 0xe6, 0xcd, 0xed, 0x20, 0x6a, 0x62, 0x9d, 0x0c, - 0xba, 0x02, 0xd3, 0x9d, 0xc0, 0xf5, 0xd9, 0x9a, 0x50, 0xea, 0xbf, 0x19, 0x33, 0x6b, 0x54, 0x2d, - 0x89, 0x80, 0xd3, 0x75, 0x98, 0xdb, 0xb9, 0x28, 0x9c, 0x39, 0xc3, 0x33, 0x5f, 0xf0, 0x37, 0x21, - 0x2f, 0xc3, 0x0a, 0x8a, 0xd6, 0xd8, 0x49, 0xcc, 0xc5, 0x19, 0x33, 0xb3, 0xf9, 0x51, 0x81, 0x74, - 0xb1, 0x07, 0x67, 0x5e, 0xd5, 0x5f, 0x1c, 0x53, 0x40, 0x4d, 0x2d, 0xe1, 0x24, 0x7d, 0x31, 0x84, - 0x33, 0x8f, 0x15, 0x58, 0xd6, 0x25, 0x9e, 0x17, 0x31, 0x43, 0x60, 0x14, 0x87, 0x38, 0x41, 0x13, - 0x7d, 0x14, 0xa6, 0x44, 0x2c, 0xc3, 0x78, 0x98, 0xce, 0xc6, 0x76, 0xe7, 0x38, 0x01, 0xc3, 0x29, - 0x6c, 0x9e, 0x79, 0xc2, 0xd9, 0x6c, 0x11, 0x71, 0xf4, 0x5d, 0x77, 0xbd, 0xdd, 0x70, 0xe6, 0x1c, - 0x3b, 0x1f, 0x44, 0xe6, 0x89, 0x24, 0x14, 0x67, 0xd4, 0x40, 0x1b, 0x30, 0xd5, 0x09, 0x08, 0x69, - 0x33, 0x46, 0x5f, 0xdc, 0x67, 0x73, 0x3c, 0xea, 0x02, 0xed, 0x49, 0x2d, 0x01, 0xbb, 0x9f, 0x51, - 0x86, 0x53, 0x14, 0xd0, 0x1d, 0x18, 0xf1, 0xf7, 0x48, 0xb0, 0x43, 0x9c, 0xe6, 0xcc, 0xf9, 0x02, - 0x3f, 0x08, 0x71, 0xb9, 0xdd, 0x10, 0xb8, 0x09, 0x93, 0x01, 0x59, 0xdc, 0xdb, 0x64, 0x40, 0x36, - 0x86, 0x7e, 0xc4, 0x82, 0x33, 0x52, 0xcb, 0x50, 0xef, 0xd0, 0x51, 0x5f, 0xf2, 0xbd, 0x30, 0x0a, - 0x78, 0x9c, 0x80, 0xc7, 0xf3, 0x7d, 0xe7, 0x37, 0x72, 0x2a, 0x29, 0x89, 0xea, 0x99, 0x3c, 0x8c, - 0x10, 0xe7, 0xb7, 0x88, 0x96, 0x60, 0x3a, 0x24, 0x91, 0x3c, 0x8c, 0x16, 0xc2, 0x95, 0xd7, 0xab, - 0xeb, 0x33, 0x4f, 0xf0, 0x20, 0x07, 0x74, 0x33, 0xd4, 0x93, 0x40, 0x9c, 0xc6, 0x47, 0x97, 0xa1, - 0xe4, 0x87, 0x33, 0xef, 0x2d, 0xc8, 0x51, 0x4a, 0x9f, 0xe2, 0xdc, 0x74, 0xec, 0x46, 0x1d, 0x97, - 0xfc, 0x70, 0xf6, 0xdb, 0x61, 0x3a, 0xc5, 0x31, 0x1c, 0x26, 0xb7, 0xcf, 0xec, 0x2e, 0x8c, 0x1b, - 0xb3, 0xf2, 0x50, 0xb5, 0xd4, 0xff, 0x76, 0x18, 0x2a, 0x4a, 0x83, 0x89, 0x2e, 0x99, 0x8a, 0xe9, - 0x33, 0x49, 0xc5, 0xf4, 0x48, 0xcd, 0x6f, 0x1a, 0xba, 0xe8, 0x8d, 0x8c, 0x68, 0x70, 0x79, 0x67, - 0x40, 0xff, 0x06, 0xf2, 0x9a, 0x58, 0xb8, 0xdc, 0xb7, 0x86, 0x7b, 0xa0, 0x50, 0xd2, 0x7c, 0x05, - 0xa6, 0x3d, 0x9f, 0xb1, 0xa9, 0xa4, 0x29, 0x79, 0x10, 0xc6, 0x6a, 0x54, 0xf4, 0xf0, 0x2a, 0x09, - 0x04, 0x9c, 0xae, 0x43, 0x1b, 0xe4, 0xbc, 0x42, 0x52, 0xb4, 0xcd, 0x59, 0x09, 0x2c, 0xa0, 0xe8, - 0x09, 0x18, 0xec, 0xf8, 0xcd, 0xd5, 0x9a, 0x60, 0x51, 0xb5, 0x18, 0xa4, 0xcd, 0xd5, 0x1a, 0xe6, - 0x30, 0xb4, 0x00, 0x43, 0xec, 0x47, 0x38, 0x33, 0x96, 0x1f, 0x47, 0x83, 0xd5, 0xd0, 0x32, 0x27, - 0xb1, 0x0a, 0x58, 0x54, 0x64, 0x22, 0x36, 0xca, 0xd7, 0x33, 0x11, 0xdb, 0xf0, 0x03, 0x8a, 0xd8, - 0x24, 0x01, 0x1c, 0xd3, 0x42, 0x77, 0xe1, 0x94, 0xf1, 0x96, 0xe2, 0x4b, 0x84, 0x84, 0xc2, 0x97, - 0xff, 0x89, 0xc2, 0x47, 0x94, 0xd0, 0x88, 0x9f, 0x15, 0x9d, 0x3e, 0xb5, 0x9a, 0x45, 0x09, 0x67, - 0x37, 0x80, 0x5a, 0x30, 0xdd, 0x48, 0xb5, 0x3a, 0xd2, 0x7f, 0xab, 0x6a, 0x42, 0xd3, 0x2d, 0xa6, - 0x09, 0xa3, 0x57, 0x60, 0xe4, 0x4d, 0x3f, 0x64, 0xc7, 0xbb, 0x60, 0xab, 0xa5, 0x23, 0xf8, 0xc8, - 0xeb, 0x37, 0xea, 0xac, 0xfc, 0xfe, 0xc1, 0xdc, 0x68, 0xcd, 0x6f, 0xca, 0xbf, 0x58, 0x55, 0x40, - 0xdf, 0x6f, 0xc1, 0x6c, 0xfa, 0xb1, 0xa6, 0x3a, 0x3d, 0xde, 0x7f, 0xa7, 0x6d, 0xd1, 0xe8, 0xec, - 0x72, 0x2e, 0x39, 0x5c, 0xd0, 0x94, 0xfd, 0x4b, 0x16, 0x13, 0xd4, 0x09, 0x4d, 0x13, 0x09, 0xbb, - 0xad, 0xe3, 0x48, 0x18, 0xbb, 0x6c, 0x28, 0xc1, 0x1e, 0xd8, 0x42, 0xe2, 0x5f, 0x5a, 0xcc, 0x42, - 0xe2, 0x18, 0x5d, 0x21, 0x5e, 0x87, 0x91, 0x48, 0x26, 0xf2, 0x2d, 0xc8, 0x71, 0xab, 0x75, 0x8a, - 0x59, 0x89, 0x28, 0x26, 0x57, 0xe5, 0xec, 0x55, 0x64, 0xec, 0x7f, 0xc2, 0x67, 0x40, 0x42, 0x8e, - 0x41, 0xd7, 0x50, 0x35, 0x75, 0x0d, 0x73, 0x3d, 0xbe, 0x20, 0x47, 0xe7, 0xf0, 0x8f, 0xcd, 0x7e, - 0x33, 0xe1, 0xce, 0x3b, 0xdd, 0x34, 0xc7, 0xfe, 0xbc, 0x05, 0x10, 0x87, 0x78, 0xee, 0x23, 0x55, - 0xdb, 0x4b, 0x94, 0xad, 0xf5, 0x23, 0xbf, 0xe1, 0xb7, 0x84, 0x26, 0xed, 0xb1, 0x58, 0xdd, 0xc1, - 0xcb, 0xef, 0x6b, 0xbf, 0xb1, 0xc2, 0x46, 0x73, 0x32, 0xa0, 0x5c, 0x39, 0x56, 0xc0, 0x19, 0xc1, - 0xe4, 0xbe, 0x68, 0xc1, 0xc9, 0x2c, 0xbb, 0x5a, 0xfa, 0x48, 0xe2, 0x62, 0x2e, 0x65, 0x36, 0xa5, - 0x66, 0xf3, 0x96, 0x28, 0xc7, 0x0a, 0xa3, 0xef, 0x1c, 0x78, 0x87, 0x8b, 0xad, 0x7c, 0x03, 0xc6, - 0x6b, 0x01, 0xd1, 0x2e, 0xd7, 0x57, 0x79, 0x90, 0x02, 0xde, 0x9f, 0x67, 0x0e, 0x1d, 0xa0, 0xc0, - 0xfe, 0x72, 0x09, 0x4e, 0x72, 0xeb, 0x83, 0x85, 0x3d, 0xdf, 0x6d, 0xd6, 0xfc, 0xa6, 0xf0, 0x9e, - 0xfa, 0x04, 0x8c, 0x75, 0x34, 0xd9, 0x64, 0x51, 0x9c, 0x50, 0x5d, 0x86, 0x19, 0x4b, 0x53, 0xf4, - 0x52, 0x6c, 0xd0, 0x42, 0x4d, 0x18, 0x23, 0x7b, 0x6e, 0x43, 0xa9, 0xb0, 0x4b, 0x87, 0xbe, 0xe8, - 0x54, 0x2b, 0xcb, 0x1a, 0x1d, 0x6c, 0x50, 0x7d, 0x08, 0x99, 0xa9, 0xed, 0x1f, 0xb3, 0xe0, 0x91, - 0x9c, 0xa8, 0xa2, 0xb4, 0xb9, 0x3b, 0xcc, 0xce, 0x43, 0x2c, 0x5b, 0xd5, 0x1c, 0xb7, 0xfe, 0xc0, - 0x02, 0x8a, 0x3e, 0x06, 0xc0, 0xad, 0x37, 0xe8, 0x2b, 0xbd, 0x57, 0xf8, 0x45, 0x23, 0x72, 0x9c, - 0x16, 0x04, 0x4c, 0xd6, 0xc7, 0x1a, 0x2d, 0xfb, 0x8b, 0x03, 0x30, 0xc8, 0xb3, 0xe8, 0xd7, 0x60, - 0x78, 0x87, 0xe7, 0x89, 0x29, 0x9c, 0x37, 0x8a, 0x2b, 0x53, 0xcf, 0xc4, 0xf3, 0xa6, 0x95, 0x62, - 0x49, 0x06, 0xad, 0xc1, 0x09, 0x9e, 0xae, 0xa7, 0x55, 0x25, 0x2d, 0x67, 0x5f, 0x8a, 0xfd, 0x78, - 0x6e, 0x59, 0x25, 0xfe, 0x5c, 0x4d, 0xa3, 0xe0, 0xac, 0x7a, 0xe8, 0x55, 0x98, 0xa0, 0xcf, 0x30, - 0xbf, 0x1b, 0x49, 0x4a, 0x3c, 0x51, 0x8f, 0x7a, 0xf7, 0x6d, 0x18, 0x50, 0x9c, 0xc0, 0x46, 0xaf, - 0xc0, 0x78, 0x27, 0x25, 0xe0, 0x1c, 0x8c, 0x25, 0x01, 0xa6, 0x50, 0xd3, 0xc4, 0x65, 0xa6, 0xb5, - 0x5d, 0x66, 0x48, 0xbc, 0xb1, 0x13, 0x90, 0x70, 0xc7, 0x6f, 0x35, 0x19, 0xfb, 0x37, 0xa8, 0x99, - 0xd6, 0x26, 0xe0, 0x38, 0x55, 0x83, 0x52, 0xd9, 0x72, 0xdc, 0x56, 0x37, 0x20, 0x31, 0x95, 0x21, - 0x93, 0xca, 0x4a, 0x02, 0x8e, 0x53, 0x35, 0x7a, 0x4b, 0x6e, 0x87, 0x8f, 0x46, 0x72, 0x6b, 0xff, - 0x9d, 0x12, 0x18, 0x53, 0xfb, 0xad, 0x9b, 0x40, 0x88, 0x7e, 0xd9, 0x76, 0xd0, 0x69, 0x08, 0xcb, - 0x98, 0xcc, 0x2f, 0x8b, 0xf3, 0x82, 0xf2, 0x2f, 0xa3, 0xff, 0x31, 0xab, 0x45, 0xf7, 0xf8, 0xa9, - 0x5a, 0xe0, 0xd3, 0x4b, 0x4e, 0x86, 0xb1, 0x52, 0x16, 0xec, 0xc3, 0xd2, 0xbb, 0xb7, 0x20, 0xe0, - 0xa3, 0xb0, 0xf1, 0xe5, 0x14, 0x0c, 0x23, 0x92, 0xba, 0xf0, 0xb5, 0x97, 0x54, 0xd0, 0x65, 0x18, - 0x15, 0x59, 0x61, 0x98, 0xa1, 0x35, 0xdf, 0x4c, 0xcc, 0xe8, 0xa5, 0x1a, 0x17, 0x63, 0x1d, 0xc7, - 0xfe, 0x81, 0x12, 0x9c, 0xc8, 0xf0, 0x94, 0xe1, 0xd7, 0xc8, 0xb6, 0x1b, 0x46, 0x2a, 0xf5, 0xa8, - 0x76, 0x8d, 0xf0, 0x72, 0xac, 0x30, 0xe8, 0x59, 0xc5, 0x2f, 0xaa, 0xe4, 0xe5, 0x24, 0x2c, 0xd1, - 0x05, 0xf4, 0x90, 0x49, 0x3c, 0xcf, 0xc3, 0x40, 0x37, 0x24, 0x32, 0x54, 0xab, 0xba, 0xb6, 0x99, - 0x5a, 0x93, 0x41, 0xe8, 0x33, 0x6a, 0x5b, 0x69, 0x08, 0xb5, 0x67, 0x14, 0xd7, 0x11, 0x72, 0x18, - 0xed, 0x5c, 0x44, 0x3c, 0xc7, 0x8b, 0xc4, 0x63, 0x2b, 0x8e, 0x39, 0xc8, 0x4a, 0xb1, 0x80, 0xda, - 0x5f, 0x28, 0xc3, 0x99, 0x5c, 0xdf, 0x39, 0xda, 0xf5, 0xb6, 0xef, 0xb9, 0x91, 0xaf, 0xac, 0x89, - 0x78, 0x9c, 0x41, 0xd2, 0xd9, 0x59, 0x13, 0xe5, 0x58, 0x61, 0xa0, 0x0b, 0x30, 0xc8, 0x84, 0xa2, - 0xa9, 0x24, 0xac, 0x8b, 0x55, 0x1e, 0x78, 0x8a, 0x83, 0xfb, 0xce, 0x9b, 0xfd, 0x04, 0xe5, 0x60, - 0xfc, 0x56, 0xf2, 0x42, 0xa1, 0xdd, 0xf5, 0xfd, 0x16, 0x66, 0x40, 0xf4, 0x3e, 0x31, 0x5e, 0x09, - 0xf3, 0x19, 0xec, 0x34, 0xfd, 0x50, 0x1b, 0xb4, 0xa7, 0x60, 0x78, 0x97, 0xec, 0x07, 0xae, 0xb7, - 0x9d, 0x34, 0xab, 0xba, 0xc6, 0x8b, 0xb1, 0x84, 0x9b, 0x59, 0x03, 0x87, 0x8f, 0x3a, 0xe1, 0xf5, - 0x48, 0x4f, 0xf6, 0xe4, 0x87, 0xca, 0x30, 0x89, 0x17, 0xab, 0xef, 0x4e, 0xc4, 0xcd, 0xf4, 0x44, - 0x1c, 0x75, 0xc2, 0xeb, 0xde, 0xb3, 0xf1, 0xf3, 0x16, 0x4c, 0xb2, 0xdc, 0x34, 0xc2, 0x43, 0xde, - 0xf5, 0xbd, 0x63, 0x78, 0x0a, 0x3c, 0x01, 0x83, 0x01, 0x6d, 0x34, 0x99, 0x7d, 0x95, 0xf5, 0x04, - 0x73, 0x18, 0x7a, 0x0c, 0x06, 0x58, 0x17, 0xe8, 0xe4, 0x8d, 0xf1, 0x23, 0xb8, 0xea, 0x44, 0x0e, - 0x66, 0xa5, 0x2c, 0xec, 0x12, 0x26, 0x9d, 0x96, 0xcb, 0x3b, 0x1d, 0xab, 0xac, 0xdf, 0x19, 0x5e, - 0xf5, 0x99, 0x5d, 0x7b, 0x7b, 0x61, 0x97, 0xb2, 0x49, 0x16, 0x3f, 0xb3, 0xff, 0xa8, 0x04, 0xe7, - 0x32, 0xeb, 0xf5, 0x1d, 0x76, 0xa9, 0xb8, 0xf6, 0xc3, 0xcc, 0x3e, 0x52, 0x3e, 0x46, 0xa3, 0xd5, - 0x81, 0x7e, 0xb9, 0xff, 0xc1, 0x3e, 0xa2, 0x21, 0x65, 0x0e, 0xd9, 0x3b, 0x24, 0x1a, 0x52, 0x66, - 0xdf, 0x72, 0xc4, 0x04, 0x7f, 0x5e, 0xca, 0xf9, 0x16, 0x26, 0x30, 0xb8, 0x48, 0xcf, 0x19, 0x06, - 0x0c, 0xe5, 0x23, 0x9c, 0x9f, 0x31, 0xbc, 0x0c, 0x2b, 0x28, 0x5a, 0x80, 0xc9, 0xb6, 0xeb, 0xd1, - 0xc3, 0x67, 0xdf, 0x64, 0xc5, 0x55, 0xb0, 0xba, 0x35, 0x13, 0x8c, 0x93, 0xf8, 0xc8, 0xd5, 0x22, - 0x25, 0xf1, 0xaf, 0x7b, 0xe5, 0x50, 0xbb, 0x6e, 0xde, 0x54, 0xe7, 0xab, 0x51, 0xcc, 0x88, 0x9a, - 0xb4, 0xa6, 0xc9, 0x89, 0xca, 0xfd, 0xcb, 0x89, 0xc6, 0xb2, 0x65, 0x44, 0xb3, 0xaf, 0xc0, 0xf8, - 0x03, 0x2b, 0x06, 0xec, 0xaf, 0x95, 0xe1, 0xd1, 0x82, 0x6d, 0xcf, 0xcf, 0x7a, 0x63, 0x0e, 0xb4, - 0xb3, 0x3e, 0x35, 0x0f, 0x35, 0x38, 0xb9, 0xd5, 0x6d, 0xb5, 0xf6, 0x99, 0x2f, 0x07, 0x69, 0x4a, - 0x0c, 0xc1, 0x53, 0x4a, 0xe1, 0xc8, 0xc9, 0x95, 0x0c, 0x1c, 0x9c, 0x59, 0x93, 0x3e, 0xb1, 0xe8, - 0x4d, 0xb2, 0xaf, 0x48, 0x25, 0x9e, 0x58, 0x58, 0x07, 0x62, 0x13, 0x17, 0x5d, 0x81, 0x69, 0x67, - 0xcf, 0x71, 0x79, 0xb8, 0x69, 0x49, 0x80, 0xbf, 0xb1, 0x94, 0x3c, 0x77, 0x21, 0x89, 0x80, 0xd3, - 0x75, 0xd0, 0x6b, 0x80, 0xfc, 0x4d, 0x66, 0xf1, 0xdd, 0xbc, 0x42, 0x3c, 0xa1, 0x75, 0x65, 0x73, - 0x57, 0x8e, 0x8f, 0x84, 0x1b, 0x29, 0x0c, 0x9c, 0x51, 0x2b, 0x11, 0x11, 0x68, 0x28, 0x3f, 0x22, - 0x50, 0xf1, 0xb9, 0xd8, 0x33, 0xf1, 0xcd, 0x7f, 0xb6, 0xe8, 0xf5, 0xc5, 0x99, 0x7c, 0x33, 0x80, - 0xe6, 0x2b, 0xcc, 0x6a, 0x92, 0xcb, 0x7a, 0xb5, 0xf8, 0x29, 0xa7, 0x34, 0xab, 0xc9, 0x18, 0x88, - 0x4d, 0x5c, 0xbe, 0x20, 0xc2, 0xd8, 0x6d, 0xd7, 0x60, 0xf1, 0x45, 0x94, 0x2f, 0x85, 0x81, 0x3e, - 0x0e, 0xc3, 0x4d, 0x77, 0xcf, 0x0d, 0x85, 0xa4, 0xeb, 0xd0, 0x6a, 0xa5, 0xf8, 0x1c, 0xac, 0x72, - 0x32, 0x58, 0xd2, 0xb3, 0x7f, 0xa8, 0x04, 0xe3, 0xb2, 0xc5, 0xd7, 0xbb, 0x7e, 0xe4, 0x1c, 0xc3, - 0xb5, 0x7c, 0xc5, 0xb8, 0x96, 0xdf, 0x57, 0x14, 0xea, 0x8c, 0x75, 0x29, 0xf7, 0x3a, 0xbe, 0x91, - 0xb8, 0x8e, 0x9f, 0xec, 0x4d, 0xaa, 0xf8, 0x1a, 0xfe, 0xa7, 0x16, 0x4c, 0x1b, 0xf8, 0xc7, 0x70, - 0x1b, 0xac, 0x98, 0xb7, 0xc1, 0xe3, 0x3d, 0xbf, 0x21, 0xe7, 0x16, 0xf8, 0xde, 0x72, 0xa2, 0xef, - 0xec, 0xf4, 0x7f, 0x13, 0x06, 0x76, 0x9c, 0xa0, 0x59, 0x94, 0xda, 0x21, 0x55, 0x69, 0xfe, 0xaa, - 0x13, 0x08, 0xb5, 0xf3, 0x33, 0x72, 0xd4, 0x69, 0x51, 0x4f, 0x95, 0x33, 0x6b, 0x0a, 0xbd, 0x04, - 0x43, 0x61, 0xc3, 0xef, 0x28, 0x4f, 0x8e, 0xf3, 0x6c, 0xa0, 0x59, 0xc9, 0xfd, 0x83, 0x39, 0x64, - 0x36, 0x47, 0x8b, 0xb1, 0xc0, 0x47, 0x9f, 0x80, 0x71, 0xf6, 0x4b, 0xd9, 0x80, 0x95, 0xf3, 0xc5, - 0x11, 0x75, 0x1d, 0x91, 0x1b, 0x48, 0x1a, 0x45, 0xd8, 0x24, 0x35, 0xbb, 0x0d, 0x15, 0xf5, 0x59, - 0x0f, 0x55, 0x6f, 0xfb, 0x1f, 0xcb, 0x70, 0x22, 0x63, 0xcd, 0xa1, 0xd0, 0x98, 0x89, 0xcb, 0x7d, - 0x2e, 0xd5, 0xb7, 0x39, 0x17, 0x21, 0x7b, 0x0d, 0x35, 0xc5, 0xda, 0xea, 0xbb, 0xd1, 0x9b, 0x21, - 0x49, 0x36, 0x4a, 0x8b, 0x7a, 0x37, 0x4a, 0x1b, 0x3b, 0xb6, 0xa1, 0xa6, 0x0d, 0xa9, 0x9e, 0x3e, - 0xd4, 0x39, 0xfd, 0xd3, 0x32, 0x9c, 0xcc, 0x8a, 0xbe, 0x88, 0x3e, 0x9b, 0x48, 0x2c, 0xfa, 0x42, - 0xbf, 0x71, 0x1b, 0x79, 0xb6, 0x51, 0x11, 0x10, 0x6e, 0xde, 0x4c, 0x35, 0xda, 0x73, 0x98, 0x45, - 0x9b, 0x2c, 0x24, 0x45, 0xc0, 0x13, 0xc2, 0xca, 0xe3, 0xe3, 0x83, 0x7d, 0x77, 0x40, 0x64, 0x92, - 0x0d, 0x13, 0xf6, 0x25, 0xb2, 0xb8, 0xb7, 0x7d, 0x89, 0x6c, 0x79, 0xd6, 0x85, 0x51, 0xed, 0x6b, - 0x1e, 0xea, 0x8c, 0xef, 0xd2, 0xdb, 0x4a, 0xeb, 0xf7, 0x43, 0x9d, 0xf5, 0x1f, 0xb3, 0x20, 0xe1, - 0x72, 0xa0, 0xc4, 0x62, 0x56, 0xae, 0x58, 0xec, 0x3c, 0x0c, 0x04, 0x7e, 0x8b, 0x24, 0x33, 0x70, - 0x62, 0xbf, 0x45, 0x30, 0x83, 0x50, 0x8c, 0x28, 0x16, 0x76, 0x8c, 0xe9, 0x0f, 0x39, 0xf1, 0x44, - 0x7b, 0x02, 0x06, 0x5b, 0x64, 0x8f, 0xb4, 0x92, 0x89, 0x92, 0xae, 0xd3, 0x42, 0xcc, 0x61, 0xf6, - 0xcf, 0x0f, 0xc0, 0xd9, 0xc2, 0xa0, 0x2e, 0xf4, 0x39, 0xb4, 0xed, 0x44, 0xe4, 0x8e, 0xb3, 0x9f, - 0xcc, 0x68, 0x72, 0x85, 0x17, 0x63, 0x09, 0x67, 0x9e, 0x64, 0x3c, 0x30, 0x79, 0x42, 0x88, 0x28, - 0xe2, 0x91, 0x0b, 0xa8, 0x29, 0x94, 0x2a, 0x1f, 0x85, 0x50, 0xea, 0x39, 0x80, 0x30, 0x6c, 0x71, - 0xc3, 0xac, 0xa6, 0x70, 0x51, 0x8b, 0x03, 0xd8, 0xd7, 0xaf, 0x0b, 0x08, 0xd6, 0xb0, 0x50, 0x15, - 0xa6, 0x3a, 0x81, 0x1f, 0x71, 0x99, 0x6c, 0x95, 0xdb, 0x2e, 0x0e, 0x9a, 0xf1, 0x34, 0x6a, 0x09, - 0x38, 0x4e, 0xd5, 0x40, 0x2f, 0xc2, 0xa8, 0x88, 0xb1, 0x51, 0xf3, 0xfd, 0x96, 0x10, 0x03, 0x29, - 0x73, 0xbe, 0x7a, 0x0c, 0xc2, 0x3a, 0x9e, 0x56, 0x8d, 0x09, 0x7a, 0x87, 0x33, 0xab, 0x71, 0x61, - 0xaf, 0x86, 0x97, 0x88, 0xc4, 0x3a, 0xd2, 0x57, 0x24, 0xd6, 0x58, 0x30, 0x56, 0xe9, 0x5b, 0xef, - 0x08, 0x3d, 0x45, 0x49, 0x3f, 0x3b, 0x00, 0x27, 0xc4, 0xc2, 0x79, 0xd8, 0xcb, 0xe5, 0x66, 0x7a, - 0xb9, 0x1c, 0x85, 0xe8, 0xec, 0xdd, 0x35, 0x73, 0xdc, 0x6b, 0xe6, 0x87, 0x2d, 0x30, 0xd9, 0x2b, - 0xf4, 0x17, 0x72, 0x53, 0x42, 0xbd, 0x98, 0xcb, 0xae, 0xa9, 0xa8, 0x9e, 0x6f, 0x33, 0x39, 0x94, - 0xfd, 0x9f, 0x2c, 0x78, 0xbc, 0x27, 0x45, 0xb4, 0x0c, 0x15, 0xc6, 0x03, 0x6a, 0xaf, 0xb3, 0x27, - 0x95, 0x6d, 0xb3, 0x04, 0xe4, 0xb0, 0xa4, 0x71, 0x4d, 0xb4, 0x9c, 0xca, 0xbd, 0xf5, 0x54, 0x46, - 0xee, 0xad, 0x53, 0xc6, 0xf0, 0x3c, 0x60, 0xf2, 0xad, 0x1f, 0xa4, 0x37, 0x8e, 0xe1, 0x57, 0x84, - 0x3e, 0x68, 0x88, 0xfd, 0xec, 0x84, 0xd8, 0x0f, 0x99, 0xd8, 0xda, 0x1d, 0xf2, 0x51, 0x98, 0x62, - 0xc1, 0xb7, 0x98, 0xa5, 0xbd, 0xf0, 0x78, 0x2a, 0xc5, 0xd6, 0xb4, 0xd7, 0x13, 0x30, 0x9c, 0xc2, - 0xb6, 0xff, 0xb0, 0x0c, 0x43, 0x7c, 0xfb, 0x1d, 0xc3, 0x9b, 0xf0, 0x69, 0xa8, 0xb8, 0xed, 0x76, - 0x97, 0xa7, 0x53, 0x1a, 0xe4, 0xbe, 0xd1, 0x74, 0x9e, 0x56, 0x65, 0x21, 0x8e, 0xe1, 0x68, 0x45, - 0x48, 0x9c, 0x0b, 0xe2, 0x7b, 0xf2, 0x8e, 0xcf, 0x57, 0x9d, 0xc8, 0xe1, 0x0c, 0x8e, 0xba, 0x67, - 0x63, 0xd9, 0x34, 0xfa, 0x14, 0x40, 0x18, 0x05, 0xae, 0xb7, 0x4d, 0xcb, 0x44, 0x58, 0xe1, 0xf7, - 0x17, 0x50, 0xab, 0x2b, 0x64, 0x4e, 0x33, 0x3e, 0x73, 0x14, 0x00, 0x6b, 0x14, 0xd1, 0xbc, 0x71, - 0xd3, 0xcf, 0x26, 0xe6, 0x0e, 0x38, 0xd5, 0x78, 0xce, 0x66, 0x3f, 0x04, 0x15, 0x45, 0xbc, 0x97, - 0xfc, 0x69, 0x4c, 0x67, 0x8b, 0x3e, 0x02, 0x93, 0x89, 0xbe, 0x1d, 0x4a, 0x7c, 0xf5, 0x0b, 0x16, - 0x4c, 0xf2, 0xce, 0x2c, 0x7b, 0x7b, 0xe2, 0x36, 0x78, 0x0b, 0x4e, 0xb6, 0x32, 0x4e, 0x65, 0x31, - 0xfd, 0xfd, 0x9f, 0xe2, 0x4a, 0x5c, 0x95, 0x05, 0xc5, 0x99, 0x6d, 0xa0, 0x8b, 0x74, 0xc7, 0xd1, - 0x53, 0xd7, 0x69, 0x09, 0x57, 0xe9, 0x31, 0xbe, 0xdb, 0x78, 0x19, 0x56, 0x50, 0xfb, 0x77, 0x2c, - 0x98, 0xe6, 0x3d, 0xbf, 0x46, 0xf6, 0xd5, 0xd9, 0xf4, 0x8d, 0xec, 0xbb, 0x48, 0xe4, 0x57, 0xca, - 0x49, 0xe4, 0xa7, 0x7f, 0x5a, 0xb9, 0xf0, 0xd3, 0xbe, 0x6c, 0x81, 0x58, 0x21, 0xc7, 0x20, 0x84, - 0xf8, 0x76, 0x53, 0x08, 0x31, 0x9b, 0xbf, 0x09, 0x72, 0xa4, 0x0f, 0x7f, 0x66, 0xc1, 0x14, 0x47, - 0x88, 0xb5, 0xe5, 0xdf, 0xd0, 0x79, 0xe8, 0x27, 0xdd, 0xf7, 0x35, 0xb2, 0xbf, 0xe1, 0xd7, 0x9c, - 0x68, 0x27, 0xfb, 0xa3, 0x8c, 0xc9, 0x1a, 0x28, 0x9c, 0xac, 0xa6, 0xdc, 0x40, 0x46, 0x9e, 0x9b, - 0x1e, 0xf1, 0x23, 0x0e, 0x9b, 0xe7, 0xc6, 0xfe, 0xba, 0x05, 0x88, 0x37, 0x63, 0x30, 0x6e, 0x94, - 0x1d, 0x62, 0xa5, 0xda, 0x45, 0x17, 0x1f, 0x4d, 0x0a, 0x82, 0x35, 0xac, 0x23, 0x19, 0x9e, 0x84, - 0xc9, 0x43, 0xb9, 0xb7, 0xc9, 0xc3, 0x21, 0x46, 0xf4, 0xdf, 0x0d, 0x41, 0xd2, 0xb7, 0x0a, 0xdd, - 0x82, 0xb1, 0x86, 0xd3, 0x71, 0x36, 0xdd, 0x96, 0x1b, 0xb9, 0x24, 0x2c, 0xb2, 0x87, 0x5a, 0xd2, - 0xf0, 0x84, 0x92, 0x5a, 0x2b, 0xc1, 0x06, 0x1d, 0x34, 0x0f, 0xd0, 0x09, 0xdc, 0x3d, 0xb7, 0x45, - 0xb6, 0x99, 0xac, 0x84, 0x05, 0x67, 0xe0, 0xc6, 0x59, 0xb2, 0x14, 0x6b, 0x18, 0x19, 0x8e, 0xec, - 0xe5, 0x87, 0xec, 0xc8, 0x0e, 0xc7, 0xe6, 0xc8, 0x3e, 0x70, 0x28, 0x47, 0xf6, 0x91, 0x43, 0x3b, - 0xb2, 0x0f, 0xf6, 0xe5, 0xc8, 0x8e, 0xe1, 0xb4, 0xe4, 0x3d, 0xe9, 0xff, 0x15, 0xb7, 0x45, 0xc4, - 0x83, 0x83, 0x47, 0x94, 0x98, 0xbd, 0x77, 0x30, 0x77, 0x1a, 0x67, 0x62, 0xe0, 0x9c, 0x9a, 0xe8, - 0x63, 0x30, 0xe3, 0xb4, 0x5a, 0xfe, 0x1d, 0x35, 0xa9, 0xcb, 0x61, 0xc3, 0x69, 0x71, 0x25, 0xc4, - 0x30, 0xa3, 0xfa, 0xd8, 0xbd, 0x83, 0xb9, 0x99, 0x85, 0x1c, 0x1c, 0x9c, 0x5b, 0x1b, 0x7d, 0x18, - 0x2a, 0x9d, 0xc0, 0x6f, 0xac, 0x69, 0x0e, 0xa0, 0xe7, 0xe8, 0x00, 0xd6, 0x64, 0xe1, 0xfd, 0x83, - 0xb9, 0x71, 0xf5, 0x87, 0x5d, 0xf8, 0x71, 0x85, 0x0c, 0xcf, 0xf4, 0xd1, 0x23, 0xf5, 0x4c, 0xdf, - 0x85, 0x13, 0x75, 0x12, 0xb8, 0x4e, 0xcb, 0x7d, 0x8b, 0xf2, 0xcb, 0xf2, 0x7c, 0xda, 0x80, 0x4a, - 0x90, 0x38, 0x91, 0xfb, 0x8a, 0xb9, 0xa9, 0x25, 0x1c, 0x91, 0x27, 0x70, 0x4c, 0xc8, 0xfe, 0x3f, - 0x16, 0x0c, 0x0b, 0x5f, 0xaa, 0x63, 0xe0, 0x1a, 0x17, 0x0c, 0x4d, 0xc2, 0x5c, 0xf6, 0x80, 0xb1, - 0xce, 0xe4, 0xea, 0x10, 0x56, 0x13, 0x3a, 0x84, 0xc7, 0x8b, 0x88, 0x14, 0x6b, 0x0f, 0xfe, 0x46, - 0x99, 0x72, 0xef, 0x86, 0x57, 0xef, 0xc3, 0x1f, 0x82, 0x75, 0x18, 0x0e, 0x85, 0x57, 0x69, 0x29, - 0xdf, 0xa7, 0x21, 0x39, 0x89, 0xb1, 0x1d, 0x9b, 0xf0, 0x23, 0x95, 0x44, 0x32, 0xdd, 0x55, 0xcb, - 0x0f, 0xd1, 0x5d, 0xb5, 0x97, 0xdf, 0xf3, 0xc0, 0x51, 0xf8, 0x3d, 0xdb, 0x5f, 0x65, 0x37, 0xa7, - 0x5e, 0x7e, 0x0c, 0x4c, 0xd5, 0x15, 0xf3, 0x8e, 0xb5, 0x0b, 0x56, 0x96, 0xe8, 0x54, 0x0e, 0x73, - 0xf5, 0x73, 0x16, 0x9c, 0xcd, 0xf8, 0x2a, 0x8d, 0xd3, 0x7a, 0x06, 0x46, 0x9c, 0x6e, 0xd3, 0x55, - 0x7b, 0x59, 0xd3, 0x27, 0x2e, 0x88, 0x72, 0xac, 0x30, 0xd0, 0x12, 0x4c, 0x93, 0xbb, 0x1d, 0x97, - 0xab, 0x52, 0x75, 0xf3, 0xdf, 0x32, 0x77, 0xc0, 0x5b, 0x4e, 0x02, 0x71, 0x1a, 0x5f, 0xc5, 0x9a, - 0x29, 0xe7, 0xc6, 0x9a, 0xf9, 0xfb, 0x16, 0x8c, 0x2a, 0xbf, 0xca, 0x87, 0x3e, 0xda, 0x1f, 0x35, - 0x47, 0xfb, 0xd1, 0x82, 0xd1, 0xce, 0x19, 0xe6, 0xdf, 0x2a, 0xa9, 0xfe, 0xd6, 0xfc, 0x20, 0xea, - 0x83, 0x83, 0x7b, 0x70, 0xd7, 0x85, 0xcb, 0x30, 0xea, 0x74, 0x3a, 0x12, 0x20, 0x6d, 0xd0, 0x58, - 0x04, 0xe5, 0xb8, 0x18, 0xeb, 0x38, 0xca, 0x93, 0xa2, 0x9c, 0xeb, 0x49, 0xd1, 0x04, 0x88, 0x9c, - 0x60, 0x9b, 0x44, 0xb4, 0x4c, 0x98, 0xcc, 0xe6, 0x9f, 0x37, 0xdd, 0xc8, 0x6d, 0xcd, 0xbb, 0x5e, - 0x14, 0x46, 0xc1, 0xfc, 0xaa, 0x17, 0xdd, 0x08, 0xf8, 0x13, 0x52, 0x8b, 0xd6, 0xa4, 0x68, 0x61, - 0x8d, 0xae, 0x8c, 0x21, 0xc0, 0xda, 0x18, 0x34, 0x8d, 0x19, 0xd6, 0x45, 0x39, 0x56, 0x18, 0xf6, - 0x87, 0xd8, 0xed, 0xc3, 0xc6, 0xf4, 0x70, 0x91, 0x8a, 0xfe, 0xe1, 0x98, 0x9a, 0x0d, 0xa6, 0xc9, - 0xac, 0xea, 0xf1, 0x90, 0x8a, 0x0f, 0x7b, 0xda, 0xb0, 0xee, 0xd7, 0x17, 0x07, 0x4d, 0x42, 0xdf, - 0x91, 0x32, 0x50, 0x79, 0xb6, 0xc7, 0xad, 0x71, 0x08, 0x93, 0x14, 0x96, 0x4e, 0x85, 0x25, 0x9b, - 0x58, 0xad, 0x89, 0x7d, 0xa1, 0xa5, 0x53, 0x11, 0x00, 0x1c, 0xe3, 0x50, 0x66, 0x4a, 0xfd, 0x09, - 0x67, 0x50, 0x1c, 0x56, 0x54, 0x61, 0x87, 0x58, 0xc3, 0x40, 0x97, 0x84, 0x40, 0x81, 0xeb, 0x05, - 0x1e, 0x4d, 0x08, 0x14, 0xe4, 0x70, 0x69, 0x52, 0xa0, 0xcb, 0x30, 0xaa, 0x32, 0x68, 0xd7, 0x78, - 0x22, 0x23, 0xb1, 0xcc, 0x96, 0xe3, 0x62, 0xac, 0xe3, 0xa0, 0x0d, 0x98, 0x0c, 0xb9, 0x9c, 0x4d, - 0xc5, 0x7a, 0xe6, 0xf2, 0xca, 0xf7, 0x4b, 0x2b, 0xa0, 0xba, 0x09, 0xbe, 0xcf, 0x8a, 0xf8, 0xe9, - 0x24, 0xfd, 0xfc, 0x93, 0x24, 0xd0, 0xab, 0x30, 0xd1, 0xf2, 0x9d, 0xe6, 0xa2, 0xd3, 0x72, 0xbc, - 0x06, 0x1b, 0x9f, 0x11, 0x33, 0x11, 0xeb, 0x75, 0x03, 0x8a, 0x13, 0xd8, 0x94, 0x79, 0xd3, 0x4b, - 0x44, 0x7c, 0x72, 0xc7, 0xdb, 0x26, 0xa1, 0xc8, 0x87, 0xcc, 0x98, 0xb7, 0xeb, 0x39, 0x38, 0x38, - 0xb7, 0x36, 0x7a, 0x09, 0xc6, 0xe4, 0xe7, 0x6b, 0x61, 0x31, 0x62, 0xa7, 0x14, 0x0d, 0x86, 0x0d, - 0x4c, 0x74, 0x07, 0x4e, 0xc9, 0xff, 0x1b, 0x81, 0xb3, 0xb5, 0xe5, 0x36, 0x84, 0xaf, 0x38, 0xf7, - 0x5e, 0x5d, 0x90, 0x2e, 0x96, 0xcb, 0x59, 0x48, 0xf7, 0x0f, 0xe6, 0xce, 0x8b, 0x51, 0xcb, 0x84, - 0xb3, 0x49, 0xcc, 0xa6, 0x8f, 0xd6, 0xe0, 0xc4, 0x0e, 0x71, 0x5a, 0xd1, 0xce, 0xd2, 0x0e, 0x69, - 0xec, 0xca, 0x4d, 0xc7, 0x82, 0x6d, 0x68, 0x0e, 0x1c, 0x57, 0xd3, 0x28, 0x38, 0xab, 0x1e, 0x7a, - 0x03, 0x66, 0x3a, 0xdd, 0xcd, 0x96, 0x1b, 0xee, 0xac, 0xfb, 0x11, 0x33, 0x05, 0x52, 0x09, 0xb9, - 0x45, 0x54, 0x0e, 0x15, 0xce, 0xa4, 0x96, 0x83, 0x87, 0x73, 0x29, 0xa0, 0xb7, 0xe0, 0x54, 0x62, - 0x31, 0x88, 0xb8, 0x04, 0x13, 0xf9, 0xd9, 0x1e, 0xea, 0x59, 0x15, 0x44, 0x88, 0x8f, 0x2c, 0x10, - 0xce, 0x6e, 0x02, 0xbd, 0x0c, 0xe0, 0x76, 0x56, 0x9c, 0xb6, 0xdb, 0xa2, 0xcf, 0xc5, 0x13, 0x6c, - 0x9d, 0xd0, 0xa7, 0x03, 0xac, 0xd6, 0x64, 0x29, 0x3d, 0x9f, 0xc5, 0xbf, 0x7d, 0xac, 0x61, 0xa3, - 0x1a, 0x4c, 0x88, 0x7f, 0xfb, 0x62, 0x5a, 0xa7, 0x55, 0x08, 0x80, 0x09, 0x59, 0x43, 0xcd, 0x25, - 0x32, 0x4b, 0xd8, 0xec, 0x25, 0xea, 0xa3, 0x6d, 0x38, 0x2b, 0xb3, 0x77, 0xe9, 0xeb, 0x54, 0xce, - 0x43, 0xc8, 0xd2, 0x2c, 0x8c, 0x70, 0xff, 0x90, 0x85, 0x22, 0x44, 0x5c, 0x4c, 0x87, 0xde, 0xef, - 0xfa, 0x72, 0xe7, 0x1e, 0xb4, 0xa7, 0xb8, 0x79, 0x12, 0xbd, 0xdf, 0xaf, 0x27, 0x81, 0x38, 0x8d, - 0x8f, 0x42, 0x38, 0xe5, 0x7a, 0x59, 0xab, 0xfb, 0x34, 0x23, 0xf4, 0x11, 0xee, 0x3c, 0x5c, 0xbc, - 0xb2, 0x33, 0xe1, 0x7c, 0x65, 0x67, 0xd2, 0x7e, 0x7b, 0x56, 0x78, 0xbf, 0x6d, 0xd1, 0xda, 0x1a, - 0xa7, 0x8e, 0x3e, 0x0d, 0x63, 0xfa, 0x87, 0x09, 0xae, 0xe3, 0x42, 0x36, 0x23, 0xab, 0x9d, 0x0f, - 0x9c, 0xcf, 0x57, 0x67, 0x80, 0x0e, 0xc3, 0x06, 0x45, 0xd4, 0xc8, 0x70, 0xb3, 0xbf, 0xd4, 0x1f, - 0x57, 0xd3, 0xbf, 0x11, 0x1a, 0x81, 0xec, 0x65, 0x8f, 0xae, 0xc3, 0x48, 0xa3, 0xe5, 0x12, 0x2f, - 0x5a, 0xad, 0x15, 0xc5, 0xd2, 0x5b, 0x12, 0x38, 0x62, 0x1f, 0x89, 0xac, 0x09, 0xbc, 0x0c, 0x2b, - 0x0a, 0xf6, 0xaf, 0x96, 0x60, 0xae, 0x47, 0x0a, 0x8e, 0x84, 0x4a, 0xca, 0xea, 0x4b, 0x25, 0xb5, - 0x20, 0xb3, 0xce, 0xaf, 0x27, 0xa4, 0x5d, 0x89, 0x8c, 0xf2, 0xb1, 0xcc, 0x2b, 0x89, 0xdf, 0xb7, - 0x8b, 0x80, 0xae, 0xd5, 0x1a, 0xe8, 0xe9, 0xe4, 0x62, 0x68, 0xb3, 0x07, 0xfb, 0x7f, 0x02, 0xe7, - 0x6a, 0x26, 0xed, 0xaf, 0x96, 0xe0, 0x94, 0x1a, 0xc2, 0x6f, 0xdd, 0x81, 0xbb, 0x99, 0x1e, 0xb8, - 0x23, 0xd0, 0xeb, 0xda, 0x37, 0x60, 0x88, 0x07, 0x07, 0xec, 0x83, 0xf5, 0x7e, 0xc2, 0x0c, 0xbe, - 0xab, 0xb8, 0x3d, 0x23, 0x00, 0xef, 0xf7, 0x5b, 0x30, 0x99, 0xf0, 0x35, 0x43, 0x58, 0x73, 0x48, - 0x7e, 0x10, 0xf6, 0x38, 0x8b, 0xf1, 0x3e, 0x0f, 0x03, 0x3b, 0x7e, 0x18, 0x25, 0x8d, 0x3e, 0xae, - 0xfa, 0x61, 0x84, 0x19, 0xc4, 0xfe, 0x5d, 0x0b, 0x06, 0x37, 0x1c, 0xd7, 0x8b, 0xa4, 0x82, 0xc0, - 0xca, 0x51, 0x10, 0xf4, 0xf3, 0x5d, 0xe8, 0x45, 0x18, 0x22, 0x5b, 0x5b, 0xa4, 0x11, 0x89, 0x59, - 0x95, 0xd1, 0x1c, 0x86, 0x96, 0x59, 0x29, 0xe5, 0x05, 0x59, 0x63, 0xfc, 0x2f, 0x16, 0xc8, 0xe8, - 0x36, 0x54, 0x22, 0xb7, 0x4d, 0x16, 0x9a, 0x4d, 0xa1, 0x36, 0x7f, 0x80, 0x88, 0x14, 0x1b, 0x92, - 0x00, 0x8e, 0x69, 0xd9, 0x5f, 0x28, 0x01, 0xc4, 0x51, 0x95, 0x7a, 0x7d, 0xe2, 0x62, 0x4a, 0xa1, - 0x7a, 0x21, 0x43, 0xa1, 0x8a, 0x62, 0x82, 0x19, 0xda, 0x54, 0x35, 0x4c, 0xe5, 0xbe, 0x86, 0x69, - 0xe0, 0x30, 0xc3, 0xb4, 0x04, 0xd3, 0x71, 0x54, 0x28, 0x33, 0x28, 0x1e, 0xbb, 0x3e, 0x37, 0x92, - 0x40, 0x9c, 0xc6, 0xb7, 0x09, 0x9c, 0x57, 0xc1, 0x71, 0xc4, 0x8d, 0xc6, 0xac, 0xb2, 0x75, 0x05, - 0x75, 0x8f, 0x71, 0x8a, 0x35, 0xc6, 0xa5, 0x5c, 0x8d, 0xf1, 0x4f, 0x5a, 0x70, 0x32, 0xd9, 0x0e, - 0x73, 0x61, 0xfe, 0xbc, 0x05, 0xa7, 0x98, 0xde, 0x9c, 0xb5, 0x9a, 0xd6, 0xd2, 0xbf, 0x50, 0x18, - 0xf0, 0x27, 0xa7, 0xc7, 0x71, 0xd8, 0x90, 0xb5, 0x2c, 0xd2, 0x38, 0xbb, 0x45, 0xfb, 0x7b, 0xca, - 0x30, 0x93, 0x17, 0x29, 0x88, 0x39, 0x6d, 0x38, 0x77, 0xeb, 0xbb, 0xe4, 0x8e, 0x30, 0x8d, 0x8f, - 0x9d, 0x36, 0x78, 0x31, 0x96, 0xf0, 0x64, 0x56, 0x85, 0x52, 0x9f, 0x59, 0x15, 0x76, 0x60, 0xfa, - 0xce, 0x0e, 0xf1, 0x6e, 0x7a, 0xa1, 0x13, 0xb9, 0xe1, 0x96, 0xcb, 0x74, 0xcc, 0x7c, 0xdd, 0xc8, - 0x54, 0xac, 0xd3, 0xb7, 0x93, 0x08, 0xf7, 0x0f, 0xe6, 0xce, 0x1a, 0x05, 0x71, 0x97, 0xf9, 0x41, - 0x82, 0xd3, 0x44, 0xd3, 0x49, 0x29, 0x06, 0x1e, 0x72, 0x52, 0x8a, 0xb6, 0x2b, 0x2c, 0x53, 0xa4, - 0x45, 0x3e, 0x7b, 0x3d, 0xae, 0xa9, 0x52, 0xac, 0x61, 0xd8, 0x9f, 0xb7, 0xe0, 0x4c, 0x6e, 0x1e, - 0x63, 0x74, 0x11, 0x46, 0x9c, 0x8e, 0xcb, 0xc5, 0xfa, 0xe2, 0xd8, 0x65, 0xe2, 0xa3, 0xda, 0x2a, - 0x17, 0xea, 0x2b, 0x28, 0x3d, 0xed, 0x76, 0x5d, 0xaf, 0x99, 0x3c, 0xed, 0xae, 0xb9, 0x5e, 0x13, - 0x33, 0x88, 0x3a, 0xbe, 0xcb, 0x79, 0xc7, 0xb7, 0xfd, 0x7d, 0x16, 0x08, 0x07, 0xd5, 0x3e, 0xce, - 0xfa, 0x4f, 0xc0, 0xd8, 0x5e, 0x3a, 0xd1, 0xd5, 0xf9, 0x7c, 0x8f, 0x5d, 0x91, 0xde, 0x4a, 0x31, - 0x70, 0x46, 0x52, 0x2b, 0x83, 0x96, 0xdd, 0x04, 0x01, 0xad, 0x12, 0x26, 0xb4, 0xee, 0xdd, 0x9b, - 0xe7, 0x00, 0x9a, 0x0c, 0x97, 0x65, 0xbf, 0x2c, 0x99, 0x37, 0x79, 0x55, 0x41, 0xb0, 0x86, 0x65, - 0xff, 0x87, 0x12, 0x8c, 0xca, 0xc4, 0x4a, 0x5d, 0xaf, 0x1f, 0xd1, 0xd2, 0xa1, 0x32, 0xad, 0xa2, - 0x4b, 0x50, 0x61, 0xb2, 0xcf, 0x5a, 0x2c, 0x91, 0x53, 0x92, 0x87, 0x35, 0x09, 0xc0, 0x31, 0x0e, - 0xdd, 0x75, 0x61, 0x77, 0x93, 0xa1, 0x27, 0xdc, 0x29, 0xeb, 0xbc, 0x18, 0x4b, 0x38, 0xfa, 0x18, - 0x4c, 0xf1, 0x7a, 0x81, 0xdf, 0x71, 0xb6, 0xb9, 0xbe, 0x64, 0x50, 0xc5, 0xa8, 0x98, 0x5a, 0x4b, - 0xc0, 0xee, 0x1f, 0xcc, 0x9d, 0x4c, 0x96, 0x31, 0x45, 0x60, 0x8a, 0x0a, 0x33, 0x8b, 0xe2, 0x8d, - 0xd0, 0xd3, 0x22, 0x65, 0x4d, 0x15, 0x83, 0xb0, 0x8e, 0x67, 0x7f, 0x1a, 0x50, 0x3a, 0xc5, 0x14, - 0x7a, 0x8d, 0xdb, 0xc2, 0xba, 0x01, 0x69, 0x16, 0x29, 0x06, 0xf5, 0x48, 0x0c, 0xd2, 0x13, 0x8a, - 0xd7, 0xc2, 0xaa, 0xbe, 0xfd, 0x97, 0xcb, 0x30, 0x95, 0xf4, 0xfd, 0x46, 0x57, 0x61, 0x88, 0xb3, - 0x2a, 0x82, 0x7c, 0x81, 0xdd, 0x89, 0xe6, 0x31, 0xce, 0x0e, 0x6d, 0xc1, 0xed, 0x88, 0xfa, 0xe8, - 0x0d, 0x18, 0x6d, 0xfa, 0x77, 0xbc, 0x3b, 0x4e, 0xd0, 0x5c, 0xa8, 0xad, 0x8a, 0xe5, 0x9c, 0xf9, - 0x10, 0xae, 0xc6, 0x68, 0xba, 0x17, 0x3a, 0xd3, 0xb1, 0xc6, 0x20, 0xac, 0x93, 0x43, 0x1b, 0x2c, - 0x2e, 0xfd, 0x96, 0xbb, 0xbd, 0xe6, 0x74, 0x8a, 0x1c, 0x23, 0x96, 0x24, 0x92, 0x46, 0x79, 0x5c, - 0x04, 0xaf, 0xe7, 0x00, 0x1c, 0x13, 0x42, 0x9f, 0x85, 0x13, 0x61, 0x8e, 0x78, 0x3e, 0x2f, 0xe3, - 0x60, 0x91, 0xc4, 0x7a, 0xf1, 0x91, 0x7b, 0x07, 0x73, 0x27, 0xb2, 0x04, 0xf9, 0x59, 0xcd, 0xd8, - 0x5f, 0x3c, 0x09, 0xc6, 0x26, 0x36, 0x12, 0xd0, 0x5a, 0x47, 0x94, 0x80, 0x16, 0xc3, 0x08, 0x69, - 0x77, 0xa2, 0xfd, 0xaa, 0x1b, 0x14, 0xa5, 0xe1, 0x5f, 0x16, 0x38, 0x69, 0x9a, 0x12, 0x82, 0x15, - 0x9d, 0xec, 0x2c, 0xc1, 0xe5, 0x6f, 0x60, 0x96, 0xe0, 0x81, 0x63, 0xcc, 0x12, 0xbc, 0x0e, 0xc3, - 0xdb, 0x6e, 0x84, 0x49, 0xc7, 0x17, 0x8f, 0x84, 0xcc, 0x75, 0x78, 0x85, 0xa3, 0xa4, 0xf3, 0x51, - 0x0a, 0x00, 0x96, 0x44, 0xd0, 0x6b, 0x6a, 0x07, 0x0e, 0xe5, 0x3f, 0xe4, 0xd3, 0x06, 0x12, 0x99, - 0x7b, 0x50, 0xe4, 0x02, 0x1e, 0x7e, 0xd0, 0x5c, 0xc0, 0x2b, 0x32, 0x83, 0xef, 0x48, 0xbe, 0x17, - 0x13, 0x4b, 0xd0, 0xdb, 0x23, 0x6f, 0xef, 0x2d, 0x3d, 0xeb, 0x71, 0x25, 0xff, 0x24, 0x50, 0x09, - 0x8d, 0xfb, 0xcc, 0x75, 0xfc, 0x7d, 0x16, 0x9c, 0xea, 0x64, 0x25, 0x00, 0x17, 0xb6, 0x04, 0x2f, - 0xf6, 0x9d, 0x63, 0xdc, 0x68, 0x90, 0xc9, 0xe0, 0xb2, 0xb3, 0xc8, 0x67, 0x37, 0x47, 0x07, 0x3a, - 0xd8, 0x6c, 0x0a, 0x9d, 0xf6, 0x13, 0x39, 0x49, 0x93, 0x0b, 0x52, 0x25, 0x6f, 0x64, 0x24, 0xe8, - 0x7d, 0x6f, 0x5e, 0x82, 0xde, 0xbe, 0xd3, 0xf2, 0xbe, 0xa6, 0xd2, 0x25, 0x8f, 0xe7, 0x2f, 0x25, - 0x9e, 0x0c, 0xb9, 0x67, 0x92, 0xe4, 0xd7, 0x54, 0x92, 0xe4, 0x82, 0xf8, 0xc1, 0x3c, 0x05, 0x72, - 0xcf, 0xd4, 0xc8, 0x5a, 0x7a, 0xe3, 0xc9, 0xa3, 0x49, 0x6f, 0x6c, 0x5c, 0x35, 0x3c, 0xc3, 0xee, - 0xd3, 0x3d, 0xae, 0x1a, 0x83, 0x6e, 0xf1, 0x65, 0xc3, 0x53, 0x39, 0x4f, 0x3f, 0x50, 0x2a, 0xe7, - 0x5b, 0x7a, 0x6a, 0x64, 0xd4, 0x23, 0xf7, 0x2f, 0x45, 0xea, 0x33, 0x21, 0xf2, 0x2d, 0xfd, 0x02, - 0x3c, 0x91, 0x4f, 0x57, 0xdd, 0x73, 0x69, 0xba, 0x99, 0x57, 0x60, 0x2a, 0xd1, 0xf2, 0xc9, 0xe3, - 0x49, 0xb4, 0x7c, 0xea, 0xc8, 0x13, 0x2d, 0x9f, 0x3e, 0x86, 0x44, 0xcb, 0x8f, 0x1c, 0x63, 0xa2, - 0xe5, 0x5b, 0xcc, 0x00, 0x87, 0x87, 0xf9, 0x11, 0xf1, 0x8e, 0x9f, 0xca, 0x89, 0x92, 0x95, 0x8e, - 0x05, 0xc4, 0x3f, 0x4e, 0x81, 0x70, 0x4c, 0x2a, 0x23, 0x81, 0xf3, 0xcc, 0x43, 0x48, 0xe0, 0xbc, - 0x1e, 0x27, 0x70, 0x3e, 0x93, 0x3f, 0xd5, 0x19, 0x2e, 0x1b, 0x39, 0x69, 0x9b, 0x6f, 0xe9, 0xe9, - 0x96, 0x1f, 0x2d, 0xd0, 0xb2, 0x64, 0x09, 0x2a, 0x0b, 0x92, 0x2c, 0xbf, 0xca, 0x93, 0x2c, 0x3f, - 0x96, 0x7f, 0x92, 0x27, 0xaf, 0x3b, 0x23, 0xb5, 0x32, 0xed, 0x97, 0x0a, 0x93, 0xc9, 0x22, 0x3b, - 0xe7, 0xf4, 0x4b, 0xc5, 0xd9, 0x4c, 0xf7, 0x4b, 0x81, 0x70, 0x4c, 0xca, 0xfe, 0x81, 0x12, 0x9c, - 0x2b, 0xde, 0x6f, 0xb1, 0xf4, 0xb5, 0x16, 0x2b, 0x9d, 0x13, 0xd2, 0x57, 0xfe, 0x66, 0x8b, 0xb1, - 0xfa, 0x8e, 0xfa, 0x77, 0x05, 0xa6, 0x95, 0xaf, 0x47, 0xcb, 0x6d, 0xec, 0xaf, 0xc7, 0x2f, 0x5f, - 0xe5, 0x1f, 0x5f, 0x4f, 0x22, 0xe0, 0x74, 0x1d, 0xb4, 0x00, 0x93, 0x46, 0xe1, 0x6a, 0x55, 0xbc, - 0xcd, 0x94, 0xb8, 0xb7, 0x6e, 0x82, 0x71, 0x12, 0xdf, 0xfe, 0x92, 0x05, 0x8f, 0xe4, 0x64, 0x28, - 0xec, 0x3b, 0xa8, 0xdd, 0x16, 0x4c, 0x76, 0xcc, 0xaa, 0x3d, 0xe2, 0x70, 0x1a, 0x79, 0x10, 0x55, - 0x5f, 0x13, 0x00, 0x9c, 0x24, 0x6a, 0xff, 0x74, 0x09, 0xce, 0x16, 0x1a, 0x2f, 0x22, 0x0c, 0xa7, - 0xb7, 0xdb, 0xa1, 0xb3, 0x14, 0x90, 0x26, 0xf1, 0x22, 0xd7, 0x69, 0xd5, 0x3b, 0xa4, 0xa1, 0xc9, - 0xcf, 0x99, 0x15, 0xe0, 0x95, 0xb5, 0xfa, 0x42, 0x1a, 0x03, 0xe7, 0xd4, 0x44, 0x2b, 0x80, 0xd2, - 0x10, 0x31, 0xc3, 0x2c, 0x46, 0x78, 0x9a, 0x1e, 0xce, 0xa8, 0x81, 0x3e, 0x04, 0xe3, 0xca, 0x28, - 0x52, 0x9b, 0x71, 0x76, 0xb0, 0x63, 0x1d, 0x80, 0x4d, 0x3c, 0x74, 0x99, 0x07, 0x99, 0x17, 0xe9, - 0x08, 0x84, 0xb0, 0x7d, 0x52, 0x46, 0x90, 0x17, 0xc5, 0x58, 0xc7, 0x59, 0x7c, 0xe9, 0xd7, 0x7e, - 0xff, 0xdc, 0x7b, 0x7e, 0xf3, 0xf7, 0xcf, 0xbd, 0xe7, 0x77, 0x7e, 0xff, 0xdc, 0x7b, 0xbe, 0xeb, - 0xde, 0x39, 0xeb, 0xd7, 0xee, 0x9d, 0xb3, 0x7e, 0xf3, 0xde, 0x39, 0xeb, 0x77, 0xee, 0x9d, 0xb3, - 0x7e, 0xef, 0xde, 0x39, 0xeb, 0x0b, 0x7f, 0x70, 0xee, 0x3d, 0x9f, 0x40, 0x71, 0x98, 0xc8, 0x4b, - 0x74, 0x76, 0x2e, 0xed, 0x5d, 0xfe, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x1b, 0x9e, 0xe7, 0x8d, - 0x05, 0x08, 0x01, 0x00, + // 14822 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x70, 0x24, 0xc9, + 0x75, 0x18, 0xcc, 0xea, 0xc6, 0xd5, 0x0f, 0x77, 0x62, 0x0e, 0x0c, 0x76, 0x66, 0x7a, 0xb6, 0x76, + 0x77, 0x76, 0xf6, 0xc2, 0x70, 0xf6, 0x20, 0x97, 0xbb, 0xe4, 0x8a, 0x38, 0x67, 0xb0, 0x03, 0x60, + 0x7a, 0xb3, 0x31, 0x33, 0xe4, 0x72, 0xc9, 0x60, 0xa1, 0x3b, 0x01, 0x14, 0xd1, 0xa8, 0xea, 0xad, + 0xaa, 0xc6, 0x0c, 0xe6, 0x23, 0x43, 0x12, 0xf5, 0xe9, 0xa0, 0xa4, 0xef, 0x0b, 0xc6, 0x17, 0xfa, + 0x8e, 0xa0, 0x14, 0x8a, 0x2f, 0x24, 0x59, 0x87, 0x69, 0xd9, 0xa6, 0x29, 0x4b, 0xb2, 0xa8, 0xcb, + 0x57, 0x58, 0x72, 0x38, 0x64, 0x59, 0x11, 0x16, 0x15, 0xa1, 0x30, 0x24, 0x8e, 0x1c, 0x21, 0x2b, + 0xc2, 0x96, 0xe4, 0xe3, 0x87, 0x0d, 0xcb, 0x96, 0x23, 0xcf, 0xca, 0xac, 0xa3, 0xbb, 0x31, 0x8b, + 0x01, 0x97, 0x8c, 0xfd, 0xd7, 0xfd, 0xde, 0xcb, 0x97, 0x59, 0x79, 0xbe, 0x7c, 0xef, 0xe5, 0x7b, + 0xf0, 0xea, 0xf6, 0xcb, 0xe1, 0xb4, 0xeb, 0x5f, 0xde, 0x6e, 0xad, 0x93, 0xc0, 0x23, 0x11, 0x09, + 0x2f, 0xef, 0x12, 0xaf, 0xee, 0x07, 0x97, 0x05, 0xc2, 0x69, 0xba, 0x97, 0x6b, 0x7e, 0x40, 0x2e, + 0xef, 0x5e, 0xb9, 0xbc, 0x49, 0x3c, 0x12, 0x38, 0x11, 0xa9, 0x4f, 0x37, 0x03, 0x3f, 0xf2, 0x11, + 0xe2, 0x34, 0xd3, 0x4e, 0xd3, 0x9d, 0xa6, 0x34, 0xd3, 0xbb, 0x57, 0xa6, 0x9e, 0xdb, 0x74, 0xa3, + 0xad, 0xd6, 0xfa, 0x74, 0xcd, 0xdf, 0xb9, 0xbc, 0xe9, 0x6f, 0xfa, 0x97, 0x19, 0xe9, 0x7a, 0x6b, + 0x83, 0xfd, 0x63, 0x7f, 0xd8, 0x2f, 0xce, 0x62, 0xea, 0xc5, 0xb8, 0x9a, 0x1d, 0xa7, 0xb6, 0xe5, + 0x7a, 0x24, 0xd8, 0xbb, 0xdc, 0xdc, 0xde, 0x64, 0xf5, 0x06, 0x24, 0xf4, 0x5b, 0x41, 0x8d, 0x24, + 0x2b, 0x6e, 0x5b, 0x2a, 0xbc, 0xbc, 0x43, 0x22, 0x27, 0xa3, 0xb9, 0x53, 0x97, 0xf3, 0x4a, 0x05, + 0x2d, 0x2f, 0x72, 0x77, 0xd2, 0xd5, 0x7c, 0xa0, 0x53, 0x81, 0xb0, 0xb6, 0x45, 0x76, 0x9c, 0x54, + 0xb9, 0x17, 0xf2, 0xca, 0xb5, 0x22, 0xb7, 0x71, 0xd9, 0xf5, 0xa2, 0x30, 0x0a, 0x92, 0x85, 0xec, + 0xaf, 0x5b, 0x70, 0x61, 0xe6, 0x76, 0x75, 0xa1, 0xe1, 0x84, 0x91, 0x5b, 0x9b, 0x6d, 0xf8, 0xb5, + 0xed, 0x6a, 0xe4, 0x07, 0xe4, 0x96, 0xdf, 0x68, 0xed, 0x90, 0x2a, 0xeb, 0x08, 0xf4, 0x2c, 0x0c, + 0xec, 0xb2, 0xff, 0x4b, 0xf3, 0x93, 0xd6, 0x05, 0xeb, 0x52, 0x69, 0x76, 0xec, 0xb7, 0xf6, 0xcb, + 0xef, 0xbb, 0xbf, 0x5f, 0x1e, 0xb8, 0x25, 0xe0, 0x58, 0x51, 0xa0, 0x8b, 0xd0, 0xb7, 0x11, 0xae, + 0xed, 0x35, 0xc9, 0x64, 0x81, 0xd1, 0x8e, 0x08, 0xda, 0xbe, 0xc5, 0x2a, 0x85, 0x62, 0x81, 0x45, + 0x97, 0xa1, 0xd4, 0x74, 0x82, 0xc8, 0x8d, 0x5c, 0xdf, 0x9b, 0x2c, 0x5e, 0xb0, 0x2e, 0xf5, 0xce, + 0x8e, 0x0b, 0xd2, 0x52, 0x45, 0x22, 0x70, 0x4c, 0x43, 0x9b, 0x11, 0x10, 0xa7, 0x7e, 0xc3, 0x6b, + 0xec, 0x4d, 0xf6, 0x5c, 0xb0, 0x2e, 0x0d, 0xc4, 0xcd, 0xc0, 0x02, 0x8e, 0x15, 0x85, 0xfd, 0xa5, + 0x02, 0x0c, 0xcc, 0x6c, 0x6c, 0xb8, 0x9e, 0x1b, 0xed, 0xa1, 0x5b, 0x30, 0xe4, 0xf9, 0x75, 0x22, + 0xff, 0xb3, 0xaf, 0x18, 0x7c, 0xfe, 0xc2, 0x74, 0x7a, 0x2a, 0x4d, 0xaf, 0x6a, 0x74, 0xb3, 0x63, + 0xf7, 0xf7, 0xcb, 0x43, 0x3a, 0x04, 0x1b, 0x7c, 0x10, 0x86, 0xc1, 0xa6, 0x5f, 0x57, 0x6c, 0x0b, + 0x8c, 0x6d, 0x39, 0x8b, 0x6d, 0x25, 0x26, 0x9b, 0x1d, 0xbd, 0xbf, 0x5f, 0x1e, 0xd4, 0x00, 0x58, + 0x67, 0x82, 0xd6, 0x61, 0x94, 0xfe, 0xf5, 0x22, 0x57, 0xf1, 0x2d, 0x32, 0xbe, 0x8f, 0xe5, 0xf1, + 0xd5, 0x48, 0x67, 0x27, 0xee, 0xef, 0x97, 0x47, 0x13, 0x40, 0x9c, 0x64, 0x68, 0xdf, 0x83, 0x91, + 0x99, 0x28, 0x72, 0x6a, 0x5b, 0xa4, 0xce, 0x47, 0x10, 0xbd, 0x08, 0x3d, 0x9e, 0xb3, 0x43, 0xc4, + 0xf8, 0x5e, 0x10, 0x1d, 0xdb, 0xb3, 0xea, 0xec, 0x90, 0x83, 0xfd, 0xf2, 0xd8, 0x4d, 0xcf, 0x7d, + 0xbb, 0x25, 0x66, 0x05, 0x85, 0x61, 0x46, 0x8d, 0x9e, 0x07, 0xa8, 0x93, 0x5d, 0xb7, 0x46, 0x2a, + 0x4e, 0xb4, 0x25, 0xc6, 0x1b, 0x89, 0xb2, 0x30, 0xaf, 0x30, 0x58, 0xa3, 0xb2, 0xef, 0x42, 0x69, + 0x66, 0xd7, 0x77, 0xeb, 0x15, 0xbf, 0x1e, 0xa2, 0x6d, 0x18, 0x6d, 0x06, 0x64, 0x83, 0x04, 0x0a, + 0x34, 0x69, 0x5d, 0x28, 0x5e, 0x1a, 0x7c, 0xfe, 0x52, 0xe6, 0xc7, 0x9a, 0xa4, 0x0b, 0x5e, 0x14, + 0xec, 0xcd, 0x9e, 0x16, 0xf5, 0x8d, 0x26, 0xb0, 0x38, 0xc9, 0xd9, 0xfe, 0x27, 0x05, 0x38, 0x39, + 0x73, 0xaf, 0x15, 0x90, 0x79, 0x37, 0xdc, 0x4e, 0xce, 0xf0, 0xba, 0x1b, 0x6e, 0xaf, 0xc6, 0x3d, + 0xa0, 0xa6, 0xd6, 0xbc, 0x80, 0x63, 0x45, 0x81, 0x9e, 0x83, 0x7e, 0xfa, 0xfb, 0x26, 0x5e, 0x12, + 0x9f, 0x3c, 0x21, 0x88, 0x07, 0xe7, 0x9d, 0xc8, 0x99, 0xe7, 0x28, 0x2c, 0x69, 0xd0, 0x0a, 0x0c, + 0xd6, 0xd8, 0x82, 0xdc, 0x5c, 0xf1, 0xeb, 0x84, 0x0d, 0x66, 0x69, 0xf6, 0x19, 0x4a, 0x3e, 0x17, + 0x83, 0x0f, 0xf6, 0xcb, 0x93, 0xbc, 0x6d, 0x82, 0x85, 0x86, 0xc3, 0x7a, 0x79, 0x64, 0xab, 0xf5, + 0xd5, 0xc3, 0x38, 0x41, 0xc6, 0xda, 0xba, 0xa4, 0x2d, 0x95, 0x5e, 0xb6, 0x54, 0x86, 0xb2, 0x97, + 0x09, 0xba, 0x02, 0x3d, 0xdb, 0xae, 0x57, 0x9f, 0xec, 0x63, 0xbc, 0xce, 0xd1, 0x31, 0xbf, 0xee, + 0x7a, 0xf5, 0x83, 0xfd, 0xf2, 0xb8, 0xd1, 0x1c, 0x0a, 0xc4, 0x8c, 0xd4, 0xfe, 0xcf, 0x16, 0x94, + 0x19, 0x6e, 0xd1, 0x6d, 0x90, 0x0a, 0x09, 0x42, 0x37, 0x8c, 0x88, 0x17, 0x19, 0x1d, 0xfa, 0x3c, + 0x40, 0x48, 0x6a, 0x01, 0x89, 0xb4, 0x2e, 0x55, 0x13, 0xa3, 0xaa, 0x30, 0x58, 0xa3, 0xa2, 0x1b, + 0x42, 0xb8, 0xe5, 0x04, 0x6c, 0x7e, 0x89, 0x8e, 0x55, 0x1b, 0x42, 0x55, 0x22, 0x70, 0x4c, 0x63, + 0x6c, 0x08, 0xc5, 0x4e, 0x1b, 0x02, 0xfa, 0x08, 0x8c, 0xc6, 0x95, 0x85, 0x4d, 0xa7, 0x26, 0x3b, + 0x90, 0x2d, 0x99, 0xaa, 0x89, 0xc2, 0x49, 0x5a, 0xfb, 0x6f, 0x5a, 0x62, 0xf2, 0xd0, 0xaf, 0x7e, + 0x97, 0x7f, 0xab, 0xfd, 0xcb, 0x16, 0xf4, 0xcf, 0xba, 0x5e, 0xdd, 0xf5, 0x36, 0xd1, 0xa7, 0x61, + 0x80, 0x9e, 0x4d, 0x75, 0x27, 0x72, 0xc4, 0xbe, 0xf7, 0x7e, 0x6d, 0x6d, 0xa9, 0xa3, 0x62, 0xba, + 0xb9, 0xbd, 0x49, 0x01, 0xe1, 0x34, 0xa5, 0xa6, 0xab, 0xed, 0xc6, 0xfa, 0x67, 0x48, 0x2d, 0x5a, + 0x21, 0x91, 0x13, 0x7f, 0x4e, 0x0c, 0xc3, 0x8a, 0x2b, 0xba, 0x0e, 0x7d, 0x91, 0x13, 0x6c, 0x92, + 0x48, 0x6c, 0x80, 0x99, 0x1b, 0x15, 0x2f, 0x89, 0xe9, 0x8a, 0x24, 0x5e, 0x8d, 0xc4, 0xc7, 0xc2, + 0x1a, 0x2b, 0x8a, 0x05, 0x0b, 0xfb, 0x7f, 0xf4, 0xc3, 0x99, 0xb9, 0xea, 0x52, 0xce, 0xbc, 0xba, + 0x08, 0x7d, 0xf5, 0xc0, 0xdd, 0x25, 0x81, 0xe8, 0x67, 0xc5, 0x65, 0x9e, 0x41, 0xb1, 0xc0, 0xa2, + 0x97, 0x61, 0x88, 0x1f, 0x48, 0xd7, 0x1c, 0xaf, 0xde, 0x90, 0x5d, 0x7c, 0x42, 0x50, 0x0f, 0xdd, + 0xd2, 0x70, 0xd8, 0xa0, 0x3c, 0xe4, 0xa4, 0xba, 0x98, 0x58, 0x8c, 0x79, 0x87, 0xdd, 0x17, 0x2c, + 0x18, 0xe3, 0xd5, 0xcc, 0x44, 0x51, 0xe0, 0xae, 0xb7, 0x22, 0x12, 0x4e, 0xf6, 0xb2, 0x9d, 0x6e, + 0x2e, 0xab, 0xb7, 0x72, 0x7b, 0x60, 0xfa, 0x56, 0x82, 0x0b, 0xdf, 0x04, 0x27, 0x45, 0xbd, 0x63, + 0x49, 0x34, 0x4e, 0x55, 0x8b, 0xbe, 0xc7, 0x82, 0xa9, 0x9a, 0xef, 0x45, 0x81, 0xdf, 0x68, 0x90, + 0xa0, 0xd2, 0x5a, 0x6f, 0xb8, 0xe1, 0x16, 0x9f, 0xa7, 0x98, 0x6c, 0xb0, 0x9d, 0x20, 0x67, 0x0c, + 0x15, 0x91, 0x18, 0xc3, 0xf3, 0xf7, 0xf7, 0xcb, 0x53, 0x73, 0xb9, 0xac, 0x70, 0x9b, 0x6a, 0xd0, + 0x36, 0x20, 0x7a, 0x94, 0x56, 0x23, 0x67, 0x93, 0xc4, 0x95, 0xf7, 0x77, 0x5f, 0xf9, 0xa9, 0xfb, + 0xfb, 0x65, 0xb4, 0x9a, 0x62, 0x81, 0x33, 0xd8, 0xa2, 0xb7, 0xe1, 0x04, 0x85, 0xa6, 0xbe, 0x75, + 0xa0, 0xfb, 0xea, 0x26, 0xef, 0xef, 0x97, 0x4f, 0xac, 0x66, 0x30, 0xc1, 0x99, 0xac, 0xd1, 0x77, + 0x59, 0x70, 0x26, 0xfe, 0xfc, 0x85, 0xbb, 0x4d, 0xc7, 0xab, 0xc7, 0x15, 0x97, 0xba, 0xaf, 0x98, + 0xee, 0xc9, 0x67, 0xe6, 0xf2, 0x38, 0xe1, 0xfc, 0x4a, 0x90, 0x07, 0x13, 0xb4, 0x69, 0xc9, 0xba, + 0xa1, 0xfb, 0xba, 0x4f, 0xdf, 0xdf, 0x2f, 0x4f, 0xac, 0xa6, 0x79, 0xe0, 0x2c, 0xc6, 0x53, 0x73, + 0x70, 0x32, 0x73, 0x76, 0xa2, 0x31, 0x28, 0x6e, 0x13, 0x2e, 0x75, 0x95, 0x30, 0xfd, 0x89, 0x4e, + 0x40, 0xef, 0xae, 0xd3, 0x68, 0x89, 0x85, 0x89, 0xf9, 0x9f, 0x57, 0x0a, 0x2f, 0x5b, 0xf6, 0x3f, + 0x2d, 0xc2, 0xe8, 0x5c, 0x75, 0xe9, 0x81, 0x56, 0xbd, 0x7e, 0xec, 0x15, 0xda, 0x1e, 0x7b, 0xf1, + 0x21, 0x5a, 0xcc, 0x3d, 0x44, 0xbf, 0x33, 0x63, 0xc9, 0xf6, 0xb0, 0x25, 0xfb, 0xa1, 0x9c, 0x25, + 0x7b, 0xc4, 0x0b, 0x75, 0x37, 0x67, 0xd6, 0xf6, 0xb2, 0x01, 0xcc, 0x94, 0x90, 0x96, 0xfd, 0x9a, + 0xd3, 0x48, 0x6e, 0xb5, 0x87, 0x9c, 0xba, 0x47, 0x33, 0x8e, 0x35, 0x18, 0x9a, 0x73, 0x9a, 0xce, + 0xba, 0xdb, 0x70, 0x23, 0x97, 0x84, 0xe8, 0x49, 0x28, 0x3a, 0xf5, 0x3a, 0x93, 0xee, 0x4a, 0xb3, + 0x27, 0xef, 0xef, 0x97, 0x8b, 0x33, 0x75, 0x2a, 0x66, 0x80, 0xa2, 0xda, 0xc3, 0x94, 0x02, 0x3d, + 0x0d, 0x3d, 0xf5, 0xc0, 0x6f, 0x4e, 0x16, 0x18, 0x25, 0x5d, 0xe5, 0x3d, 0xf3, 0x81, 0xdf, 0x4c, + 0x90, 0x32, 0x1a, 0xfb, 0x37, 0x0b, 0x70, 0x76, 0x8e, 0x34, 0xb7, 0x16, 0xab, 0x39, 0xe7, 0xc5, + 0x25, 0x18, 0xd8, 0xf1, 0x3d, 0x37, 0xf2, 0x83, 0x50, 0x54, 0xcd, 0x66, 0xc4, 0x8a, 0x80, 0x61, + 0x85, 0x45, 0x17, 0xa0, 0xa7, 0x19, 0x0b, 0xb1, 0x43, 0x52, 0x00, 0x66, 0xe2, 0x2b, 0xc3, 0x50, + 0x8a, 0x56, 0x48, 0x02, 0x31, 0x63, 0x14, 0xc5, 0xcd, 0x90, 0x04, 0x98, 0x61, 0x62, 0x49, 0x80, + 0xca, 0x08, 0xe2, 0x44, 0x48, 0x48, 0x02, 0x14, 0x83, 0x35, 0x2a, 0x54, 0x81, 0x52, 0x98, 0x18, + 0xd9, 0xae, 0x96, 0xe6, 0x30, 0x13, 0x15, 0xd4, 0x48, 0xc6, 0x4c, 0x8c, 0x13, 0xac, 0xaf, 0xa3, + 0xa8, 0xf0, 0xb5, 0x02, 0x20, 0xde, 0x85, 0xdf, 0x62, 0x1d, 0x77, 0x33, 0xdd, 0x71, 0xdd, 0x2f, + 0x89, 0xa3, 0xea, 0xbd, 0xff, 0x62, 0xc1, 0xd9, 0x39, 0xd7, 0xab, 0x93, 0x20, 0x67, 0x02, 0x3e, + 0x9c, 0xbb, 0xf3, 0xe1, 0x84, 0x14, 0x63, 0x8a, 0xf5, 0x1c, 0xc1, 0x14, 0xb3, 0xff, 0xc2, 0x02, + 0xc4, 0x3f, 0xfb, 0x5d, 0xf7, 0xb1, 0x37, 0xd3, 0x1f, 0x7b, 0x04, 0xd3, 0xc2, 0xfe, 0x3b, 0x16, + 0x0c, 0xce, 0x35, 0x1c, 0x77, 0x47, 0x7c, 0xea, 0x1c, 0x8c, 0x4b, 0x45, 0x11, 0x03, 0x6b, 0xb2, + 0x3f, 0xdd, 0xdc, 0xc6, 0x71, 0x12, 0x89, 0xd3, 0xf4, 0xe8, 0x13, 0x70, 0xc6, 0x00, 0xae, 0x91, + 0x9d, 0x66, 0xc3, 0x89, 0xf4, 0x5b, 0x01, 0x3b, 0xfd, 0x71, 0x1e, 0x11, 0xce, 0x2f, 0x6f, 0x2f, + 0xc3, 0xc8, 0x5c, 0xc3, 0x25, 0x5e, 0xb4, 0x54, 0x99, 0xf3, 0xbd, 0x0d, 0x77, 0x13, 0xbd, 0x02, + 0x23, 0x91, 0xbb, 0x43, 0xfc, 0x56, 0x54, 0x25, 0x35, 0xdf, 0x63, 0x77, 0x6d, 0xeb, 0x52, 0xef, + 0x2c, 0xba, 0xbf, 0x5f, 0x1e, 0x59, 0x33, 0x30, 0x38, 0x41, 0x69, 0xff, 0x21, 0x1d, 0x71, 0x7f, + 0xa7, 0xe9, 0x7b, 0xc4, 0x8b, 0xe6, 0x7c, 0xaf, 0xce, 0x75, 0x32, 0xaf, 0x40, 0x4f, 0x44, 0x47, + 0x90, 0x7f, 0xf9, 0x45, 0xb9, 0xb4, 0xe9, 0xb8, 0x1d, 0xec, 0x97, 0x4f, 0xa5, 0x4b, 0xb0, 0x91, + 0x65, 0x65, 0xd0, 0x87, 0xa0, 0x2f, 0x8c, 0x9c, 0xa8, 0x15, 0x8a, 0x4f, 0x7d, 0x54, 0x8e, 0x7f, + 0x95, 0x41, 0x0f, 0xf6, 0xcb, 0xa3, 0xaa, 0x18, 0x07, 0x61, 0x51, 0x00, 0x3d, 0x05, 0xfd, 0x3b, + 0x24, 0x0c, 0x9d, 0x4d, 0x79, 0x7e, 0x8f, 0x8a, 0xb2, 0xfd, 0x2b, 0x1c, 0x8c, 0x25, 0x1e, 0x3d, + 0x06, 0xbd, 0x24, 0x08, 0xfc, 0x40, 0xec, 0x2a, 0xc3, 0x82, 0xb0, 0x77, 0x81, 0x02, 0x31, 0xc7, + 0xd9, 0xff, 0xd2, 0x82, 0x51, 0xd5, 0x56, 0x5e, 0xd7, 0x31, 0xdc, 0x9b, 0xde, 0x04, 0xa8, 0xc9, + 0x0f, 0x0c, 0xd9, 0x79, 0x37, 0xf8, 0xfc, 0xc5, 0x4c, 0xd1, 0x22, 0xd5, 0x8d, 0x31, 0x67, 0x05, + 0x0a, 0xb1, 0xc6, 0xcd, 0xfe, 0x35, 0x0b, 0x26, 0x12, 0x5f, 0xb4, 0xec, 0x86, 0x11, 0x7a, 0x2b, + 0xf5, 0x55, 0xd3, 0xdd, 0x7d, 0x15, 0x2d, 0xcd, 0xbe, 0x49, 0x2d, 0x3e, 0x09, 0xd1, 0xbe, 0xe8, + 0x1a, 0xf4, 0xba, 0x11, 0xd9, 0x91, 0x1f, 0xf3, 0x58, 0xdb, 0x8f, 0xe1, 0xad, 0x8a, 0x47, 0x64, + 0x89, 0x96, 0xc4, 0x9c, 0x81, 0xfd, 0x9b, 0x45, 0x28, 0xf1, 0x69, 0xbb, 0xe2, 0x34, 0x8f, 0x61, + 0x2c, 0x9e, 0x81, 0x92, 0xbb, 0xb3, 0xd3, 0x8a, 0x9c, 0x75, 0x71, 0x00, 0x0d, 0xf0, 0xcd, 0x60, + 0x49, 0x02, 0x71, 0x8c, 0x47, 0x4b, 0xd0, 0xc3, 0x9a, 0xc2, 0xbf, 0xf2, 0xc9, 0xec, 0xaf, 0x14, + 0x6d, 0x9f, 0x9e, 0x77, 0x22, 0x87, 0xcb, 0x7e, 0xea, 0xe4, 0xa3, 0x20, 0xcc, 0x58, 0x20, 0x07, + 0x60, 0xdd, 0xf5, 0x9c, 0x60, 0x8f, 0xc2, 0x26, 0x8b, 0x8c, 0xe1, 0x73, 0xed, 0x19, 0xce, 0x2a, + 0x7a, 0xce, 0x56, 0x7d, 0x58, 0x8c, 0xc0, 0x1a, 0xd3, 0xa9, 0x0f, 0x42, 0x49, 0x11, 0x1f, 0x46, + 0x84, 0x9b, 0xfa, 0x08, 0x8c, 0x26, 0xea, 0xea, 0x54, 0x7c, 0x48, 0x97, 0x00, 0x7f, 0x85, 0x6d, + 0x19, 0xa2, 0xd5, 0x0b, 0xde, 0xae, 0xd8, 0x39, 0xef, 0xc1, 0x89, 0x46, 0xc6, 0xde, 0x2b, 0xc6, + 0xb5, 0xfb, 0xbd, 0xfa, 0xac, 0xf8, 0xec, 0x13, 0x59, 0x58, 0x9c, 0x59, 0x07, 0x95, 0x6a, 0xfc, + 0x26, 0x5d, 0x20, 0x4e, 0x43, 0xbf, 0x20, 0xdc, 0x10, 0x30, 0xac, 0xb0, 0x74, 0xbf, 0x3b, 0xa1, + 0x1a, 0x7f, 0x9d, 0xec, 0x55, 0x49, 0x83, 0xd4, 0x22, 0x3f, 0xf8, 0xa6, 0x36, 0xff, 0x1c, 0xef, + 0x7d, 0xbe, 0x5d, 0x0e, 0x0a, 0x06, 0xc5, 0xeb, 0x64, 0x8f, 0x0f, 0x85, 0xfe, 0x75, 0xc5, 0xb6, + 0x5f, 0xf7, 0x15, 0x0b, 0x86, 0xd5, 0xd7, 0x1d, 0xc3, 0xbe, 0x30, 0x6b, 0xee, 0x0b, 0xe7, 0xda, + 0x4e, 0xf0, 0x9c, 0x1d, 0xe1, 0x6b, 0x05, 0x38, 0xa3, 0x68, 0xe8, 0x6d, 0x86, 0xff, 0x11, 0xb3, + 0xea, 0x32, 0x94, 0x3c, 0xa5, 0xd7, 0xb3, 0x4c, 0x85, 0x5a, 0xac, 0xd5, 0x8b, 0x69, 0xa8, 0x50, + 0xea, 0xc5, 0xc7, 0xec, 0x90, 0xae, 0xf0, 0x16, 0xca, 0xed, 0x59, 0x28, 0xb6, 0xdc, 0xba, 0x38, + 0x60, 0xde, 0x2f, 0x7b, 0xfb, 0xe6, 0xd2, 0xfc, 0xc1, 0x7e, 0xf9, 0xd1, 0x3c, 0x63, 0x0b, 0x3d, + 0xd9, 0xc2, 0xe9, 0x9b, 0x4b, 0xf3, 0x98, 0x16, 0x46, 0x33, 0x30, 0x2a, 0x4f, 0xe8, 0x5b, 0x54, + 0x40, 0xf4, 0x3d, 0x71, 0x0e, 0x29, 0xad, 0x35, 0x36, 0xd1, 0x38, 0x49, 0x8f, 0xe6, 0x61, 0x6c, + 0xbb, 0xb5, 0x4e, 0x1a, 0x24, 0xe2, 0x1f, 0x7c, 0x9d, 0x70, 0x9d, 0x6e, 0x29, 0xbe, 0x4b, 0x5e, + 0x4f, 0xe0, 0x71, 0xaa, 0x84, 0xfd, 0xd7, 0xec, 0x3c, 0x10, 0xbd, 0x57, 0x09, 0x7c, 0x3a, 0xb1, + 0x28, 0xf7, 0x6f, 0xe6, 0x74, 0xee, 0x66, 0x56, 0x5c, 0x27, 0x7b, 0x6b, 0x3e, 0xbd, 0x4b, 0x64, + 0xcf, 0x0a, 0x63, 0xce, 0xf7, 0xb4, 0x9d, 0xf3, 0xbf, 0x50, 0x80, 0x93, 0xaa, 0x07, 0x0c, 0xb1, + 0xf5, 0x5b, 0xbd, 0x0f, 0xae, 0xc0, 0x60, 0x9d, 0x6c, 0x38, 0xad, 0x46, 0xa4, 0x0c, 0x0c, 0xbd, + 0xdc, 0xc8, 0x34, 0x1f, 0x83, 0xb1, 0x4e, 0x73, 0x88, 0x6e, 0xfb, 0xf9, 0x61, 0x76, 0x10, 0x47, + 0x0e, 0x9d, 0xe3, 0x6a, 0xd5, 0x58, 0xb9, 0xab, 0xe6, 0x31, 0xe8, 0x75, 0x77, 0xa8, 0x60, 0x56, + 0x30, 0xe5, 0xad, 0x25, 0x0a, 0xc4, 0x1c, 0x87, 0x9e, 0x80, 0xfe, 0x9a, 0xbf, 0xb3, 0xe3, 0x78, + 0x75, 0x76, 0xe4, 0x95, 0x66, 0x07, 0xa9, 0xec, 0x36, 0xc7, 0x41, 0x58, 0xe2, 0xd0, 0x59, 0xe8, + 0x71, 0x82, 0x4d, 0xae, 0x75, 0x29, 0xcd, 0x0e, 0xd0, 0x9a, 0x66, 0x82, 0xcd, 0x10, 0x33, 0x28, + 0xbd, 0x34, 0xde, 0xf1, 0x83, 0x6d, 0xd7, 0xdb, 0x9c, 0x77, 0x03, 0xb1, 0x24, 0xd4, 0x59, 0x78, + 0x5b, 0x61, 0xb0, 0x46, 0x85, 0x16, 0xa1, 0xb7, 0xe9, 0x07, 0x51, 0x38, 0xd9, 0xc7, 0xba, 0xfb, + 0xd1, 0x9c, 0x8d, 0x88, 0x7f, 0x6d, 0xc5, 0x0f, 0xa2, 0xf8, 0x03, 0xe8, 0xbf, 0x10, 0xf3, 0xe2, + 0x68, 0x19, 0xfa, 0x89, 0xb7, 0xbb, 0x18, 0xf8, 0x3b, 0x93, 0x13, 0xf9, 0x9c, 0x16, 0x38, 0x09, + 0x9f, 0x66, 0xb1, 0x8c, 0x2a, 0xc0, 0x58, 0xb2, 0x40, 0x1f, 0x82, 0x22, 0xf1, 0x76, 0x27, 0xfb, + 0x19, 0xa7, 0xa9, 0x1c, 0x4e, 0xb7, 0x9c, 0x20, 0xde, 0xf3, 0x17, 0xbc, 0x5d, 0x4c, 0xcb, 0xa0, + 0x8f, 0x43, 0x49, 0x6e, 0x18, 0xa1, 0x50, 0x67, 0x66, 0x4e, 0x58, 0xb9, 0xcd, 0x60, 0xf2, 0x76, + 0xcb, 0x0d, 0xc8, 0x0e, 0xf1, 0xa2, 0x30, 0xde, 0x21, 0x25, 0x36, 0xc4, 0x31, 0x37, 0x54, 0x83, + 0xa1, 0x80, 0x84, 0xee, 0x3d, 0x52, 0xf1, 0x1b, 0x6e, 0x6d, 0x6f, 0xf2, 0x34, 0x6b, 0xde, 0x53, + 0x6d, 0xbb, 0x0c, 0x6b, 0x05, 0x62, 0x75, 0xbb, 0x0e, 0xc5, 0x06, 0x53, 0xf4, 0x06, 0x0c, 0x07, + 0x24, 0x8c, 0x9c, 0x20, 0x12, 0xb5, 0x4c, 0x2a, 0xf3, 0xd8, 0x30, 0xd6, 0x11, 0xfc, 0x3a, 0x11, + 0x57, 0x13, 0x63, 0xb0, 0xc9, 0x01, 0x7d, 0x5c, 0xea, 0xfe, 0x57, 0xfc, 0x96, 0x17, 0x85, 0x93, + 0x25, 0xd6, 0xee, 0x4c, 0xab, 0xec, 0xad, 0x98, 0x2e, 0x69, 0x1c, 0xe0, 0x85, 0xb1, 0xc1, 0x0a, + 0x7d, 0x12, 0x86, 0xf9, 0x7f, 0x6e, 0xdb, 0x0c, 0x27, 0x4f, 0x32, 0xde, 0x17, 0xf2, 0x79, 0x73, + 0xc2, 0xd9, 0x93, 0x82, 0xf9, 0xb0, 0x0e, 0x0d, 0xb1, 0xc9, 0x0d, 0x61, 0x18, 0x6e, 0xb8, 0xbb, + 0xc4, 0x23, 0x61, 0x58, 0x09, 0xfc, 0x75, 0x22, 0x54, 0xb5, 0x67, 0xb2, 0x6d, 0xa1, 0xfe, 0x3a, + 0x99, 0x1d, 0xa7, 0x3c, 0x97, 0xf5, 0x32, 0xd8, 0x64, 0x81, 0x6e, 0xc2, 0x08, 0xbd, 0x1b, 0xbb, + 0x31, 0xd3, 0xc1, 0x4e, 0x4c, 0xd9, 0x7d, 0x10, 0x1b, 0x85, 0x70, 0x82, 0x09, 0xba, 0x01, 0x43, + 0xac, 0xcf, 0x5b, 0x4d, 0xce, 0xf4, 0x54, 0x27, 0xa6, 0xcc, 0x94, 0x5e, 0xd5, 0x8a, 0x60, 0x83, + 0x01, 0x7a, 0x1d, 0x4a, 0x0d, 0x77, 0x83, 0xd4, 0xf6, 0x6a, 0x0d, 0x32, 0x39, 0xc4, 0xb8, 0x65, + 0x6e, 0x86, 0xcb, 0x92, 0x88, 0xcb, 0xe7, 0xea, 0x2f, 0x8e, 0x8b, 0xa3, 0x5b, 0x70, 0x2a, 0x22, + 0xc1, 0x8e, 0xeb, 0x39, 0x74, 0x13, 0x13, 0x57, 0x42, 0x66, 0xa2, 0x1e, 0x66, 0xb3, 0xeb, 0xbc, + 0x18, 0x8d, 0x53, 0x6b, 0x99, 0x54, 0x38, 0xa7, 0x34, 0xba, 0x0b, 0x93, 0x19, 0x18, 0x3e, 0x6f, + 0x4f, 0x30, 0xce, 0x1f, 0x16, 0x9c, 0x27, 0xd7, 0x72, 0xe8, 0x0e, 0xda, 0xe0, 0x70, 0x2e, 0x77, + 0x74, 0x03, 0x46, 0xd9, 0xce, 0x59, 0x69, 0x35, 0x1a, 0xa2, 0xc2, 0x11, 0x56, 0xe1, 0x13, 0x52, + 0x8e, 0x58, 0x32, 0xd1, 0x07, 0xfb, 0x65, 0x88, 0xff, 0xe1, 0x64, 0x69, 0xb4, 0xce, 0xac, 0xa1, + 0xad, 0xc0, 0x8d, 0xf6, 0xe8, 0xaa, 0x22, 0x77, 0xa3, 0xc9, 0xd1, 0xb6, 0x9a, 0x21, 0x9d, 0x54, + 0x99, 0x4c, 0x75, 0x20, 0x4e, 0x32, 0xa4, 0x47, 0x41, 0x18, 0xd5, 0x5d, 0x6f, 0x72, 0x8c, 0xdf, + 0xa7, 0xe4, 0x4e, 0x5a, 0xa5, 0x40, 0xcc, 0x71, 0xcc, 0x12, 0x4a, 0x7f, 0xdc, 0xa0, 0x27, 0xee, + 0x38, 0x23, 0x8c, 0x2d, 0xa1, 0x12, 0x81, 0x63, 0x1a, 0x2a, 0x04, 0x47, 0xd1, 0xde, 0x24, 0x62, + 0xa4, 0x6a, 0x43, 0x5c, 0x5b, 0xfb, 0x38, 0xa6, 0x70, 0x7b, 0x1d, 0x46, 0xd4, 0x36, 0xc1, 0xfa, + 0x04, 0x95, 0xa1, 0x97, 0x89, 0x7d, 0x42, 0x8f, 0x59, 0xa2, 0x4d, 0x60, 0x22, 0x21, 0xe6, 0x70, + 0xd6, 0x04, 0xf7, 0x1e, 0x99, 0xdd, 0x8b, 0x08, 0xd7, 0x45, 0x14, 0xb5, 0x26, 0x48, 0x04, 0x8e, + 0x69, 0xec, 0xff, 0xc9, 0xc5, 0xe7, 0xf8, 0x94, 0xe8, 0xe2, 0x5c, 0x7c, 0x16, 0x06, 0xb6, 0xfc, + 0x30, 0xa2, 0xd4, 0xac, 0x8e, 0xde, 0x58, 0x60, 0xbe, 0x26, 0xe0, 0x58, 0x51, 0xa0, 0x57, 0x61, + 0xb8, 0xa6, 0x57, 0x20, 0x0e, 0x75, 0xb5, 0x8d, 0x18, 0xb5, 0x63, 0x93, 0x16, 0xbd, 0x0c, 0x03, + 0xcc, 0xbb, 0xa7, 0xe6, 0x37, 0x84, 0xb4, 0x29, 0x25, 0x93, 0x81, 0x8a, 0x80, 0x1f, 0x68, 0xbf, + 0xb1, 0xa2, 0x46, 0x17, 0xa1, 0x8f, 0x36, 0x61, 0xa9, 0x22, 0x8e, 0x53, 0xa5, 0x92, 0xbb, 0xc6, + 0xa0, 0x58, 0x60, 0xed, 0x5f, 0xb3, 0x98, 0x2c, 0x95, 0xde, 0xf3, 0xd1, 0x35, 0x76, 0x68, 0xb0, + 0x13, 0x44, 0x53, 0x89, 0x3d, 0xae, 0x9d, 0x04, 0x0a, 0x77, 0x90, 0xf8, 0x8f, 0x8d, 0x92, 0xe8, + 0xcd, 0xe4, 0xc9, 0xc0, 0x05, 0x8a, 0x17, 0x65, 0x17, 0x24, 0x4f, 0x87, 0x47, 0xe2, 0x23, 0x8e, + 0xb6, 0xa7, 0xdd, 0x11, 0x61, 0xff, 0x5f, 0x05, 0x6d, 0x96, 0x54, 0x23, 0x27, 0x22, 0xa8, 0x02, + 0xfd, 0x77, 0x1c, 0x37, 0x72, 0xbd, 0x4d, 0x21, 0xf7, 0xb5, 0x3f, 0xe8, 0x58, 0xa1, 0xdb, 0xbc, + 0x00, 0x97, 0x5e, 0xc4, 0x1f, 0x2c, 0xd9, 0x50, 0x8e, 0x41, 0xcb, 0xf3, 0x28, 0xc7, 0x42, 0xb7, + 0x1c, 0x31, 0x2f, 0xc0, 0x39, 0x8a, 0x3f, 0x58, 0xb2, 0x41, 0x6f, 0x01, 0xc8, 0x1d, 0x82, 0xd4, + 0x85, 0x57, 0xd0, 0xb3, 0x9d, 0x99, 0xae, 0xa9, 0x32, 0xb3, 0x23, 0x54, 0x36, 0x8a, 0xff, 0x63, + 0x8d, 0x9f, 0x1d, 0x69, 0x63, 0xaa, 0x37, 0x06, 0x7d, 0x82, 0x2e, 0x51, 0x27, 0x88, 0x48, 0x7d, + 0x26, 0x12, 0x9d, 0xf3, 0x74, 0x77, 0x97, 0xc3, 0x35, 0x77, 0x87, 0xe8, 0xcb, 0x59, 0x30, 0xc1, + 0x31, 0x3f, 0xfb, 0x97, 0x8a, 0x30, 0x99, 0xd7, 0x5c, 0xba, 0x68, 0xc8, 0x5d, 0x37, 0x9a, 0xa3, + 0x62, 0xad, 0x65, 0x2e, 0x9a, 0x05, 0x01, 0xc7, 0x8a, 0x82, 0xce, 0xde, 0xd0, 0xdd, 0x94, 0x77, + 0xfb, 0xde, 0x78, 0xf6, 0x56, 0x19, 0x14, 0x0b, 0x2c, 0xa5, 0x0b, 0x88, 0x13, 0x0a, 0xb7, 0x33, + 0x6d, 0x96, 0x63, 0x06, 0xc5, 0x02, 0xab, 0x6b, 0x19, 0x7b, 0x3a, 0x68, 0x19, 0x8d, 0x2e, 0xea, + 0x3d, 0xda, 0x2e, 0x42, 0x9f, 0x02, 0xd8, 0x70, 0x3d, 0x37, 0xdc, 0x62, 0xdc, 0xfb, 0x0e, 0xcd, + 0x5d, 0x09, 0xc5, 0x8b, 0x8a, 0x0b, 0xd6, 0x38, 0xa2, 0x97, 0x60, 0x50, 0x6d, 0x20, 0x4b, 0xf3, + 0xcc, 0x06, 0xaf, 0xf9, 0x34, 0xc5, 0xbb, 0xe9, 0x3c, 0xd6, 0xe9, 0xec, 0xcf, 0x24, 0xe7, 0x8b, + 0x58, 0x01, 0x5a, 0xff, 0x5a, 0xdd, 0xf6, 0x6f, 0xa1, 0x7d, 0xff, 0xda, 0xdf, 0xe8, 0x83, 0x51, + 0xa3, 0xb2, 0x56, 0xd8, 0xc5, 0x9e, 0x7b, 0x95, 0x1e, 0x40, 0x4e, 0x44, 0xc4, 0xfa, 0xb3, 0x3b, + 0x2f, 0x15, 0xfd, 0x90, 0xa2, 0x2b, 0x80, 0x97, 0x47, 0x9f, 0x82, 0x52, 0xc3, 0x09, 0x99, 0xc6, + 0x92, 0x88, 0x75, 0xd7, 0x0d, 0xb3, 0xf8, 0x42, 0xe8, 0x84, 0x91, 0x76, 0xea, 0x73, 0xde, 0x31, + 0x4b, 0x7a, 0x52, 0x52, 0xf9, 0x4a, 0xfa, 0x35, 0xaa, 0x46, 0x50, 0x21, 0x6c, 0x0f, 0x73, 0x1c, + 0x7a, 0x99, 0x6d, 0xad, 0x74, 0x56, 0xcc, 0x51, 0x69, 0x94, 0x4d, 0xb3, 0x5e, 0x43, 0xc8, 0x56, + 0x38, 0x6c, 0x50, 0xc6, 0x77, 0xb2, 0xbe, 0x36, 0x77, 0xb2, 0xa7, 0xa0, 0x9f, 0xfd, 0x50, 0x33, + 0x40, 0x8d, 0xc6, 0x12, 0x07, 0x63, 0x89, 0x4f, 0x4e, 0x98, 0x81, 0xee, 0x26, 0x0c, 0xbd, 0xf5, + 0x89, 0x49, 0xcd, 0xfc, 0x1f, 0x06, 0xf8, 0x2e, 0x27, 0xa6, 0x3c, 0x96, 0x38, 0xf4, 0x33, 0x16, + 0x20, 0xa7, 0x41, 0x6f, 0xcb, 0x14, 0xac, 0x2e, 0x37, 0xc0, 0x44, 0xed, 0x57, 0x3b, 0x76, 0x7b, + 0x2b, 0x9c, 0x9e, 0x49, 0x95, 0xe6, 0x9a, 0xd2, 0x57, 0x44, 0x13, 0x51, 0x9a, 0x40, 0x3f, 0x8c, + 0x96, 0xdd, 0x30, 0xfa, 0xfc, 0x1f, 0x25, 0x0e, 0xa7, 0x8c, 0x26, 0xa1, 0x9b, 0xfa, 0xe5, 0x6b, + 0xf0, 0x90, 0x97, 0xaf, 0xe1, 0xbc, 0x8b, 0xd7, 0x54, 0x0b, 0x4e, 0xe7, 0x7c, 0x41, 0x86, 0xfe, + 0x75, 0x5e, 0xd7, 0xbf, 0x76, 0xd0, 0xda, 0x4d, 0xcb, 0x3a, 0xa6, 0xdf, 0x68, 0x39, 0x5e, 0xe4, + 0x46, 0x7b, 0xba, 0xbe, 0xf6, 0x69, 0x18, 0x99, 0x77, 0xc8, 0x8e, 0xef, 0x2d, 0x78, 0xf5, 0xa6, + 0xef, 0x7a, 0x11, 0x9a, 0x84, 0x1e, 0x26, 0x7c, 0xf0, 0xad, 0xb7, 0x87, 0xf6, 0x1e, 0x66, 0x10, + 0x7b, 0x13, 0x4e, 0xce, 0xfb, 0x77, 0xbc, 0x3b, 0x4e, 0x50, 0x9f, 0xa9, 0x2c, 0x69, 0xfa, 0xa4, + 0x55, 0xa9, 0xcf, 0xb0, 0xf2, 0x6f, 0x8b, 0x5a, 0x49, 0x7e, 0x1d, 0x5a, 0x74, 0x1b, 0x24, 0x47, + 0xeb, 0xf7, 0xff, 0x16, 0x8c, 0x9a, 0x62, 0x7a, 0x65, 0x77, 0xb6, 0x72, 0xed, 0xce, 0x6f, 0xc0, + 0xc0, 0x86, 0x4b, 0x1a, 0x75, 0x4c, 0x36, 0x44, 0xef, 0x3c, 0x99, 0xef, 0x99, 0xb6, 0x48, 0x29, + 0xa5, 0x96, 0x97, 0x6b, 0x43, 0x16, 0x45, 0x61, 0xac, 0xd8, 0xa0, 0x6d, 0x18, 0x93, 0x7d, 0x28, + 0xb1, 0x62, 0x3f, 0x78, 0xaa, 0xdd, 0xc0, 0x9b, 0xcc, 0x4f, 0xdc, 0xdf, 0x2f, 0x8f, 0xe1, 0x04, + 0x1b, 0x9c, 0x62, 0x8c, 0xce, 0x42, 0xcf, 0x0e, 0x3d, 0xf9, 0x7a, 0x58, 0xf7, 0x33, 0xf5, 0x07, + 0xd3, 0xe4, 0x30, 0xa8, 0xfd, 0x63, 0x16, 0x9c, 0x4e, 0xf5, 0x8c, 0xd0, 0x68, 0x1d, 0xf1, 0x28, + 0x24, 0x35, 0x4c, 0x85, 0xce, 0x1a, 0x26, 0xfb, 0x6f, 0x59, 0x70, 0x62, 0x61, 0xa7, 0x19, 0xed, + 0xcd, 0xbb, 0xa6, 0x91, 0xf8, 0x83, 0xd0, 0xb7, 0x43, 0xea, 0x6e, 0x6b, 0x47, 0x8c, 0x5c, 0x59, + 0x9e, 0x0e, 0x2b, 0x0c, 0x7a, 0xb0, 0x5f, 0x1e, 0xae, 0x46, 0x7e, 0xe0, 0x6c, 0x12, 0x0e, 0xc0, + 0x82, 0x9c, 0x9d, 0xb1, 0xee, 0x3d, 0xb2, 0xec, 0xee, 0xb8, 0xd1, 0x83, 0xcd, 0x76, 0x61, 0xdf, + 0x95, 0x4c, 0x70, 0xcc, 0xcf, 0xfe, 0xba, 0x05, 0xa3, 0x72, 0xde, 0xcf, 0xd4, 0xeb, 0x01, 0x09, + 0x43, 0x34, 0x05, 0x05, 0xb7, 0x29, 0x5a, 0x09, 0xa2, 0x95, 0x85, 0xa5, 0x0a, 0x2e, 0xb8, 0x4d, + 0x29, 0xce, 0xb3, 0x03, 0xa8, 0x68, 0x9a, 0xba, 0xaf, 0x09, 0x38, 0x56, 0x14, 0xe8, 0x12, 0x0c, + 0x78, 0x7e, 0x9d, 0x4b, 0xc4, 0x5c, 0x94, 0x60, 0x13, 0x6c, 0x55, 0xc0, 0xb0, 0xc2, 0xa2, 0x0a, + 0x94, 0xb8, 0x23, 0x64, 0x3c, 0x69, 0xbb, 0x72, 0xa7, 0x64, 0x5f, 0xb6, 0x26, 0x4b, 0xe2, 0x98, + 0x89, 0xfd, 0x1b, 0x16, 0x0c, 0xc9, 0x2f, 0xeb, 0xf2, 0xae, 0x42, 0x97, 0x56, 0x7c, 0x4f, 0x89, + 0x97, 0x16, 0xbd, 0x6b, 0x30, 0x8c, 0x71, 0xc5, 0x28, 0x1e, 0xea, 0x8a, 0x71, 0x05, 0x06, 0x9d, + 0x66, 0xb3, 0x62, 0xde, 0x4f, 0xd8, 0x54, 0x9a, 0x89, 0xc1, 0x58, 0xa7, 0xb1, 0x7f, 0xb4, 0x00, + 0x23, 0xf2, 0x0b, 0xaa, 0xad, 0xf5, 0x90, 0x44, 0x68, 0x0d, 0x4a, 0x0e, 0x1f, 0x25, 0x22, 0x27, + 0xf9, 0x63, 0xd9, 0x7a, 0x33, 0x63, 0x48, 0x63, 0x41, 0x6b, 0x46, 0x96, 0xc6, 0x31, 0x23, 0xd4, + 0x80, 0x71, 0xcf, 0x8f, 0xd8, 0xa1, 0xab, 0xf0, 0xed, 0x4c, 0x99, 0x49, 0xee, 0x67, 0x04, 0xf7, + 0xf1, 0xd5, 0x24, 0x17, 0x9c, 0x66, 0x8c, 0x16, 0xa4, 0x2e, 0xb2, 0x98, 0xaf, 0x44, 0xd2, 0x07, + 0x2e, 0x5b, 0x15, 0x69, 0xff, 0xaa, 0x05, 0x25, 0x49, 0x76, 0x1c, 0x56, 0xeb, 0x15, 0xe8, 0x0f, + 0xd9, 0x20, 0xc8, 0xae, 0xb1, 0xdb, 0x35, 0x9c, 0x8f, 0x57, 0x2c, 0x4b, 0xf0, 0xff, 0x21, 0x96, + 0x3c, 0x98, 0x29, 0x4a, 0x35, 0xff, 0x5d, 0x62, 0x8a, 0x52, 0xed, 0xc9, 0x39, 0x94, 0xfe, 0x94, + 0xb5, 0x59, 0xd3, 0xed, 0x52, 0x91, 0xb7, 0x19, 0x90, 0x0d, 0xf7, 0x6e, 0x52, 0xe4, 0xad, 0x30, + 0x28, 0x16, 0x58, 0xf4, 0x16, 0x0c, 0xd5, 0xa4, 0x0d, 0x22, 0x5e, 0xe1, 0x17, 0xdb, 0xda, 0xc3, + 0x94, 0xe9, 0x94, 0xeb, 0xd0, 0xe6, 0xb4, 0xf2, 0xd8, 0xe0, 0x66, 0x3a, 0xfa, 0x14, 0x3b, 0x39, + 0xfa, 0xc4, 0x7c, 0xf3, 0xdd, 0x5e, 0x7e, 0xdc, 0x82, 0x3e, 0xae, 0x7b, 0xee, 0x4e, 0xf5, 0xaf, + 0x59, 0x92, 0xe3, 0xbe, 0xbb, 0x45, 0x81, 0x42, 0xd2, 0x40, 0x2b, 0x50, 0x62, 0x3f, 0x98, 0xee, + 0xbc, 0x98, 0xff, 0x0e, 0x87, 0xd7, 0xaa, 0x37, 0xf0, 0x96, 0x2c, 0x86, 0x63, 0x0e, 0xf6, 0x8f, + 0x14, 0xe9, 0xee, 0x16, 0x93, 0x1a, 0x87, 0xbe, 0xf5, 0xf0, 0x0e, 0xfd, 0xc2, 0xc3, 0x3a, 0xf4, + 0x37, 0x61, 0xb4, 0xa6, 0xd9, 0x9d, 0xe3, 0x91, 0xbc, 0xd4, 0x76, 0x92, 0x68, 0x26, 0x6a, 0xae, + 0x9d, 0x9b, 0x33, 0x99, 0xe0, 0x24, 0x57, 0xf4, 0x09, 0x18, 0xe2, 0xe3, 0x2c, 0x6a, 0xe1, 0xbe, + 0x52, 0x4f, 0xe4, 0xcf, 0x17, 0xbd, 0x0a, 0xae, 0xcd, 0xd5, 0x8a, 0x63, 0x83, 0x99, 0xfd, 0x97, + 0x16, 0xa0, 0x85, 0xe6, 0x16, 0xd9, 0x21, 0x81, 0xd3, 0x88, 0xcd, 0x47, 0x3f, 0x68, 0xc1, 0x24, + 0x49, 0x81, 0xe7, 0xfc, 0x9d, 0x1d, 0x71, 0x59, 0xcc, 0xd1, 0x67, 0x2c, 0xe4, 0x94, 0x51, 0x0f, + 0x95, 0x26, 0xf3, 0x28, 0x70, 0x6e, 0x7d, 0x68, 0x05, 0x26, 0xf8, 0x29, 0xa9, 0x10, 0x9a, 0xdf, + 0xd5, 0x23, 0x82, 0xf1, 0xc4, 0x5a, 0x9a, 0x04, 0x67, 0x95, 0xb3, 0x7f, 0x75, 0x18, 0x72, 0x5b, + 0xf1, 0x9e, 0xdd, 0xec, 0x3d, 0xbb, 0xd9, 0x7b, 0x76, 0xb3, 0xf7, 0xec, 0x66, 0xef, 0xd9, 0xcd, + 0xde, 0xb3, 0x9b, 0xbd, 0x4b, 0xed, 0x66, 0xff, 0xb7, 0x05, 0x27, 0xd5, 0xf1, 0x65, 0x5c, 0xd8, + 0x3f, 0x0b, 0x13, 0x7c, 0xb9, 0x19, 0x3e, 0xc6, 0xe2, 0xb8, 0xbe, 0x92, 0x39, 0x73, 0x13, 0xbe, + 0xf0, 0x46, 0x41, 0xfe, 0xa8, 0x28, 0x03, 0x81, 0xb3, 0xaa, 0xb1, 0x7f, 0x69, 0x00, 0x7a, 0x17, + 0x76, 0x89, 0x17, 0x1d, 0xc3, 0xd5, 0xa6, 0x06, 0x23, 0xae, 0xb7, 0xeb, 0x37, 0x76, 0x49, 0x9d, + 0xe3, 0x0f, 0x73, 0x03, 0x3f, 0x25, 0x58, 0x8f, 0x2c, 0x19, 0x2c, 0x70, 0x82, 0xe5, 0xc3, 0xb0, + 0x3e, 0x5c, 0x85, 0x3e, 0x7e, 0xf8, 0x08, 0xd3, 0x43, 0xe6, 0x9e, 0xcd, 0x3a, 0x51, 0x1c, 0xa9, + 0xb1, 0x65, 0x84, 0x1f, 0x6e, 0xa2, 0x38, 0xfa, 0x0c, 0x8c, 0x6c, 0xb8, 0x41, 0x18, 0xad, 0xb9, + 0x3b, 0xf4, 0x68, 0xd8, 0x69, 0x3e, 0x80, 0xb5, 0x41, 0xf5, 0xc3, 0xa2, 0xc1, 0x09, 0x27, 0x38, + 0xa3, 0x4d, 0x18, 0x6e, 0x38, 0x7a, 0x55, 0xfd, 0x87, 0xae, 0x4a, 0x9d, 0x0e, 0xcb, 0x3a, 0x23, + 0x6c, 0xf2, 0xa5, 0xcb, 0xa9, 0xc6, 0x14, 0xe6, 0x03, 0x4c, 0x9d, 0xa1, 0x96, 0x13, 0xd7, 0x94, + 0x73, 0x1c, 0x15, 0xd0, 0x98, 0x23, 0x7b, 0xc9, 0x14, 0xd0, 0x34, 0x77, 0xf5, 0x4f, 0x43, 0x89, + 0xd0, 0x2e, 0xa4, 0x8c, 0xc5, 0x01, 0x73, 0xb9, 0xbb, 0xb6, 0xae, 0xb8, 0xb5, 0xc0, 0x37, 0xed, + 0x3c, 0x0b, 0x92, 0x13, 0x8e, 0x99, 0xa2, 0x39, 0xe8, 0x0b, 0x49, 0xe0, 0x2a, 0x5d, 0x72, 0x9b, + 0x61, 0x64, 0x64, 0xfc, 0xd5, 0x1a, 0xff, 0x8d, 0x45, 0x51, 0x3a, 0xbd, 0x1c, 0xa6, 0x8a, 0x65, + 0x87, 0x81, 0x36, 0xbd, 0x66, 0x18, 0x14, 0x0b, 0x2c, 0x7a, 0x1d, 0xfa, 0x03, 0xd2, 0x60, 0x86, + 0xc4, 0xe1, 0xee, 0x27, 0x39, 0xb7, 0x4b, 0xf2, 0x72, 0x58, 0x32, 0x40, 0xd7, 0x01, 0x05, 0x84, + 0x0a, 0x78, 0xae, 0xb7, 0xa9, 0xdc, 0xbb, 0xc5, 0x46, 0xab, 0x04, 0x69, 0x1c, 0x53, 0xc8, 0x07, + 0x8b, 0x38, 0xa3, 0x18, 0xba, 0x0a, 0xe3, 0x0a, 0xba, 0xe4, 0x85, 0x91, 0x43, 0x37, 0xb8, 0x51, + 0xc6, 0x4b, 0xe9, 0x57, 0x70, 0x92, 0x00, 0xa7, 0xcb, 0xd8, 0x3f, 0x67, 0x01, 0xef, 0xe7, 0x63, + 0xd0, 0x2a, 0xbc, 0x66, 0x6a, 0x15, 0xce, 0xe4, 0x8e, 0x5c, 0x8e, 0x46, 0xe1, 0xe7, 0x2c, 0x18, + 0xd4, 0x46, 0x36, 0x9e, 0xb3, 0x56, 0x9b, 0x39, 0xdb, 0x82, 0x31, 0x3a, 0xd3, 0x6f, 0xac, 0x87, + 0x24, 0xd8, 0x25, 0x75, 0x36, 0x31, 0x0b, 0x0f, 0x36, 0x31, 0x95, 0x2b, 0xe9, 0x72, 0x82, 0x21, + 0x4e, 0x55, 0x61, 0x7f, 0x5a, 0x36, 0x55, 0x79, 0xde, 0xd6, 0xd4, 0x98, 0x27, 0x3c, 0x6f, 0xd5, + 0xa8, 0xe2, 0x98, 0x86, 0x2e, 0xb5, 0x2d, 0x3f, 0x8c, 0x92, 0x9e, 0xb7, 0xd7, 0xfc, 0x30, 0xc2, + 0x0c, 0x63, 0xbf, 0x00, 0xb0, 0x70, 0x97, 0xd4, 0xf8, 0x8c, 0xd5, 0x2f, 0x3d, 0x56, 0xfe, 0xa5, + 0xc7, 0xfe, 0x3d, 0x0b, 0x46, 0x16, 0xe7, 0x8c, 0x93, 0x6b, 0x1a, 0x80, 0xdf, 0xd4, 0x6e, 0xdf, + 0x5e, 0x95, 0xee, 0x1f, 0xdc, 0x02, 0xae, 0xa0, 0x58, 0xa3, 0x40, 0x67, 0xa0, 0xd8, 0x68, 0x79, + 0x42, 0xed, 0xd9, 0x4f, 0x8f, 0xc7, 0xe5, 0x96, 0x87, 0x29, 0x4c, 0x7b, 0xac, 0x54, 0xec, 0xfa, + 0xb1, 0x52, 0xc7, 0x20, 0x25, 0xa8, 0x0c, 0xbd, 0x77, 0xee, 0xb8, 0x75, 0xfe, 0x14, 0x5c, 0xb8, + 0xa6, 0xdc, 0xbe, 0xbd, 0x34, 0x1f, 0x62, 0x0e, 0xb7, 0xbf, 0x58, 0x84, 0xa9, 0xc5, 0x06, 0xb9, + 0xfb, 0x0e, 0x9f, 0xc3, 0x77, 0xfb, 0xd4, 0xea, 0x70, 0x0a, 0xa4, 0xc3, 0x3e, 0xa7, 0xeb, 0xdc, + 0x1f, 0x1b, 0xd0, 0xcf, 0x1d, 0x4f, 0xe5, 0xe3, 0xf8, 0x4c, 0x73, 0x5f, 0x7e, 0x87, 0x4c, 0x73, + 0x07, 0x56, 0x61, 0xee, 0x53, 0x07, 0xa6, 0x80, 0x62, 0xc9, 0x7c, 0xea, 0x15, 0x18, 0xd2, 0x29, + 0x0f, 0xf5, 0xb0, 0xf5, 0xbb, 0x8b, 0x30, 0x46, 0x5b, 0xf0, 0x50, 0x07, 0xe2, 0x66, 0x7a, 0x20, + 0x8e, 0xfa, 0x71, 0x63, 0xe7, 0xd1, 0x78, 0x2b, 0x39, 0x1a, 0x57, 0xf2, 0x46, 0xe3, 0xb8, 0xc7, + 0xe0, 0x7b, 0x2c, 0x98, 0x58, 0x6c, 0xf8, 0xb5, 0xed, 0xc4, 0x03, 0xc4, 0x97, 0x60, 0x90, 0x6e, + 0xc7, 0xa1, 0x11, 0x8b, 0xc3, 0x88, 0xce, 0x22, 0x50, 0x58, 0xa7, 0xd3, 0x8a, 0xdd, 0xbc, 0xb9, + 0x34, 0x9f, 0x15, 0xd4, 0x45, 0xa0, 0xb0, 0x4e, 0x67, 0xff, 0x8e, 0x05, 0xe7, 0xae, 0xce, 0x2d, + 0xc4, 0x53, 0x31, 0x15, 0x57, 0xe6, 0x22, 0xf4, 0x35, 0xeb, 0x5a, 0x53, 0x62, 0xb5, 0xf0, 0x3c, + 0x6b, 0x85, 0xc0, 0xbe, 0x5b, 0x62, 0x26, 0xdd, 0x04, 0xb8, 0x8a, 0x2b, 0x73, 0x62, 0xdf, 0x95, + 0x56, 0x20, 0x2b, 0xd7, 0x0a, 0xf4, 0x04, 0xf4, 0xd3, 0x73, 0xc1, 0xad, 0xc9, 0x76, 0x73, 0x83, + 0x3e, 0x07, 0x61, 0x89, 0xb3, 0x7f, 0xd6, 0x82, 0x89, 0xab, 0x6e, 0x44, 0x0f, 0xed, 0x64, 0xe0, + 0x14, 0x7a, 0x6a, 0x87, 0x6e, 0xe4, 0x07, 0x7b, 0xc9, 0xc0, 0x29, 0x58, 0x61, 0xb0, 0x46, 0xc5, + 0x3f, 0x68, 0xd7, 0x65, 0x2f, 0x29, 0x0a, 0xa6, 0xdd, 0x0d, 0x0b, 0x38, 0x56, 0x14, 0xb4, 0xbf, + 0xea, 0x6e, 0xc0, 0x54, 0x96, 0x7b, 0x62, 0xe3, 0x56, 0xfd, 0x35, 0x2f, 0x11, 0x38, 0xa6, 0xb1, + 0xff, 0xdc, 0x82, 0xf2, 0xd5, 0x46, 0x2b, 0x8c, 0x48, 0xb0, 0x11, 0xe6, 0x6c, 0xba, 0x2f, 0x40, + 0x89, 0x48, 0x03, 0x81, 0x7c, 0xf2, 0x29, 0x05, 0x51, 0x65, 0x39, 0xe0, 0xf1, 0x5b, 0x14, 0x5d, + 0x17, 0xaf, 0xa4, 0x0f, 0xf7, 0xcc, 0x75, 0x11, 0x10, 0xd1, 0xeb, 0xd2, 0x03, 0xda, 0xb0, 0xc8, + 0x18, 0x0b, 0x29, 0x2c, 0xce, 0x28, 0x61, 0xff, 0x98, 0x05, 0x27, 0xd5, 0x07, 0xbf, 0xeb, 0x3e, + 0xd3, 0xfe, 0x6a, 0x01, 0x86, 0xaf, 0xad, 0xad, 0x55, 0xae, 0x92, 0x48, 0x9b, 0x95, 0xed, 0xcd, + 0xfe, 0x58, 0xb3, 0x5e, 0xb6, 0xbb, 0x23, 0xb6, 0x22, 0xb7, 0x31, 0xcd, 0xe3, 0xa2, 0x4d, 0x2f, + 0x79, 0xd1, 0x8d, 0xa0, 0x1a, 0x05, 0xae, 0xb7, 0x99, 0x39, 0xd3, 0xa5, 0xcc, 0x52, 0xcc, 0x93, + 0x59, 0xd0, 0x0b, 0xd0, 0xc7, 0x02, 0xb3, 0xc9, 0x41, 0x78, 0x44, 0x5d, 0xb1, 0x18, 0xf4, 0x60, + 0xbf, 0x5c, 0xba, 0x89, 0x97, 0xf8, 0x1f, 0x2c, 0x48, 0xd1, 0x4d, 0x18, 0xdc, 0x8a, 0xa2, 0xe6, + 0x35, 0xe2, 0xd4, 0x49, 0x20, 0x77, 0xd9, 0xf3, 0x59, 0xbb, 0x2c, 0xed, 0x04, 0x4e, 0x16, 0x6f, + 0x4c, 0x31, 0x2c, 0xc4, 0x3a, 0x1f, 0xbb, 0x0a, 0x10, 0xe3, 0x8e, 0xc8, 0x70, 0x63, 0xaf, 0x41, + 0x89, 0x7e, 0xee, 0x4c, 0xc3, 0x75, 0xda, 0x9b, 0xc6, 0x9f, 0x81, 0x92, 0x34, 0x7c, 0x87, 0x22, + 0x8a, 0x03, 0x3b, 0x91, 0xa4, 0x5d, 0x3c, 0xc4, 0x31, 0xde, 0x7e, 0x1c, 0x84, 0x6f, 0x69, 0x3b, + 0x96, 0xf6, 0x06, 0x9c, 0x60, 0x4e, 0xb2, 0x4e, 0xb4, 0x65, 0xcc, 0xd1, 0xce, 0x93, 0xe1, 0x59, + 0x71, 0xaf, 0xe3, 0x5f, 0x36, 0xa9, 0x3d, 0x4e, 0x1e, 0x92, 0x1c, 0xe3, 0x3b, 0x9e, 0xfd, 0x67, + 0x3d, 0xf0, 0xc8, 0x52, 0x35, 0x3f, 0xfc, 0xd0, 0xcb, 0x30, 0xc4, 0xc5, 0x45, 0x3a, 0x35, 0x9c, + 0x86, 0xa8, 0x57, 0x69, 0x40, 0xd7, 0x34, 0x1c, 0x36, 0x28, 0xd1, 0x39, 0x28, 0xba, 0x6f, 0x7b, + 0xc9, 0xa7, 0x7b, 0x4b, 0x6f, 0xac, 0x62, 0x0a, 0xa7, 0x68, 0x2a, 0x79, 0xf2, 0x2d, 0x5d, 0xa1, + 0x95, 0xf4, 0xf9, 0x1a, 0x8c, 0xb8, 0x61, 0x2d, 0x74, 0x97, 0x3c, 0xba, 0x4e, 0xb5, 0x95, 0xae, + 0x74, 0x0e, 0xb4, 0xd1, 0x0a, 0x8b, 0x13, 0xd4, 0xda, 0xf9, 0xd2, 0xdb, 0xb5, 0xf4, 0xda, 0x31, + 0xf8, 0x01, 0xdd, 0xfe, 0x9b, 0xec, 0xeb, 0x42, 0xa6, 0x82, 0x17, 0xdb, 0x3f, 0xff, 0xe0, 0x10, + 0x4b, 0x1c, 0xbd, 0xd0, 0xd5, 0xb6, 0x9c, 0xe6, 0x4c, 0x2b, 0xda, 0x9a, 0x77, 0xc3, 0x9a, 0xbf, + 0x4b, 0x82, 0x3d, 0x76, 0x17, 0x1f, 0x88, 0x2f, 0x74, 0x0a, 0x31, 0x77, 0x6d, 0xa6, 0x42, 0x29, + 0x71, 0xba, 0x0c, 0x9a, 0x81, 0x51, 0x09, 0xac, 0x92, 0x90, 0x1d, 0x01, 0x83, 0x8c, 0x8d, 0x7a, + 0x4c, 0x27, 0xc0, 0x8a, 0x49, 0x92, 0xde, 0x14, 0x70, 0xe1, 0x28, 0x04, 0xdc, 0x0f, 0xc2, 0xb0, + 0xeb, 0xb9, 0x91, 0xeb, 0x44, 0x3e, 0xb7, 0x1f, 0xf1, 0x6b, 0x37, 0x53, 0x30, 0x2f, 0xe9, 0x08, + 0x6c, 0xd2, 0xd9, 0xff, 0xb6, 0x07, 0xc6, 0xd9, 0xb0, 0xbd, 0x37, 0xc3, 0xbe, 0x9d, 0x66, 0xd8, + 0xcd, 0xf4, 0x0c, 0x3b, 0x0a, 0xc9, 0xfd, 0x81, 0xa7, 0xd9, 0x67, 0xa0, 0xa4, 0xde, 0x0f, 0xca, + 0x07, 0xc4, 0x56, 0xce, 0x03, 0xe2, 0xce, 0xa7, 0xb7, 0x74, 0x49, 0x2b, 0x66, 0xba, 0xa4, 0x7d, + 0xd9, 0x82, 0xd8, 0xb0, 0x80, 0xde, 0x80, 0x52, 0xd3, 0x67, 0x1e, 0xae, 0x81, 0x74, 0x1b, 0x7f, + 0xbc, 0xad, 0x65, 0x82, 0x47, 0x60, 0x0b, 0x78, 0x2f, 0x54, 0x64, 0x51, 0x1c, 0x73, 0x41, 0xd7, + 0xa1, 0xbf, 0x19, 0x90, 0x6a, 0xc4, 0xc2, 0x03, 0x75, 0xcf, 0x90, 0xcf, 0x1a, 0x5e, 0x10, 0x4b, + 0x0e, 0xf6, 0xbf, 0xb7, 0x60, 0x2c, 0x49, 0x8a, 0x3e, 0x0c, 0x3d, 0xe4, 0x2e, 0xa9, 0x89, 0xf6, + 0x66, 0x1e, 0xc5, 0xb1, 0x6a, 0x82, 0x77, 0x00, 0xfd, 0x8f, 0x59, 0x29, 0x74, 0x0d, 0xfa, 0xe9, + 0x39, 0x7c, 0x55, 0x85, 0xc2, 0x7b, 0x34, 0xef, 0x2c, 0x57, 0x02, 0x0d, 0x6f, 0x9c, 0x00, 0x61, + 0x59, 0x9c, 0xf9, 0x81, 0xd5, 0x9a, 0x55, 0x7a, 0xc5, 0x89, 0xda, 0xdd, 0xc4, 0xd7, 0xe6, 0x2a, + 0x9c, 0x48, 0x70, 0xe3, 0x7e, 0x60, 0x12, 0x88, 0x63, 0x26, 0xf6, 0x2f, 0x58, 0x00, 0xdc, 0xed, + 0xcd, 0xf1, 0x36, 0xc9, 0x31, 0x68, 0xd3, 0xe7, 0xa1, 0x27, 0x6c, 0x92, 0x5a, 0x3b, 0xe7, 0xeb, + 0xb8, 0x3d, 0xd5, 0x26, 0xa9, 0xc5, 0x33, 0x8e, 0xfe, 0xc3, 0xac, 0xb4, 0xfd, 0xbd, 0x00, 0x23, + 0x31, 0xd9, 0x52, 0x44, 0x76, 0xd0, 0x73, 0x46, 0xd0, 0x91, 0x33, 0x89, 0xa0, 0x23, 0x25, 0x46, + 0xad, 0x29, 0x6e, 0x3f, 0x03, 0xc5, 0x1d, 0xe7, 0xae, 0xd0, 0xcc, 0x3d, 0xd3, 0xbe, 0x19, 0x94, + 0xff, 0xf4, 0x8a, 0x73, 0x97, 0x5f, 0x5e, 0x9f, 0x91, 0x2b, 0x64, 0xc5, 0xb9, 0xdb, 0xd1, 0x41, + 0x98, 0x56, 0xc2, 0xea, 0x72, 0x3d, 0xe1, 0xd1, 0xd5, 0x55, 0x5d, 0xae, 0x97, 0xac, 0xcb, 0xf5, + 0xba, 0xa8, 0xcb, 0xf5, 0xd0, 0x3d, 0xe8, 0x17, 0x0e, 0x97, 0x22, 0x2c, 0xd9, 0xe5, 0x2e, 0xea, + 0x13, 0xfe, 0x9a, 0xbc, 0xce, 0xcb, 0xf2, 0x72, 0x2e, 0xa0, 0x1d, 0xeb, 0x95, 0x15, 0xa2, 0xff, + 0xc7, 0x82, 0x11, 0xf1, 0x1b, 0x93, 0xb7, 0x5b, 0x24, 0x8c, 0x84, 0xf0, 0xfa, 0x81, 0xee, 0xdb, + 0x20, 0x0a, 0xf2, 0xa6, 0x7c, 0x40, 0x9e, 0x33, 0x26, 0xb2, 0x63, 0x8b, 0x12, 0xad, 0x40, 0x7f, + 0xdb, 0x82, 0x13, 0x3b, 0xce, 0x5d, 0x5e, 0x23, 0x87, 0x61, 0x27, 0x72, 0x7d, 0xe1, 0xb8, 0xf0, + 0xe1, 0xee, 0x86, 0x3f, 0x55, 0x9c, 0x37, 0x52, 0x5a, 0x29, 0x4f, 0x64, 0x91, 0x74, 0x6c, 0x6a, + 0x66, 0xbb, 0xa6, 0x36, 0x60, 0x40, 0xce, 0xb7, 0x87, 0xe9, 0xdd, 0xcd, 0xea, 0x11, 0x73, 0xed, + 0xa1, 0xd6, 0xf3, 0x19, 0x18, 0xd2, 0xe7, 0xd8, 0x43, 0xad, 0xeb, 0x6d, 0x98, 0xc8, 0x98, 0x4b, + 0x0f, 0xb5, 0xca, 0x3b, 0x70, 0x26, 0x77, 0x7e, 0x3c, 0x54, 0xef, 0xfc, 0xaf, 0x5a, 0xfa, 0x3e, + 0x78, 0x0c, 0x26, 0x8d, 0x39, 0xd3, 0xa4, 0x71, 0xbe, 0xfd, 0xca, 0xc9, 0xb1, 0x6b, 0xbc, 0xa5, + 0x37, 0x9a, 0xee, 0xea, 0xe8, 0x75, 0xe8, 0x6b, 0x50, 0x88, 0x74, 0xdb, 0xb5, 0x3b, 0xaf, 0xc8, + 0x58, 0x98, 0x64, 0xf0, 0x10, 0x0b, 0x0e, 0xf6, 0x2f, 0x5b, 0xd0, 0x73, 0x0c, 0x3d, 0x81, 0xcd, + 0x9e, 0x78, 0x2e, 0x97, 0xb5, 0x88, 0xd0, 0x3e, 0x8d, 0x9d, 0x3b, 0x0b, 0x77, 0x23, 0xe2, 0x85, + 0xec, 0x44, 0xce, 0xec, 0x98, 0x9f, 0xb2, 0x60, 0x62, 0xd9, 0x77, 0xea, 0xb3, 0x4e, 0xc3, 0xf1, + 0x6a, 0x24, 0x58, 0xf2, 0x36, 0x0f, 0xe5, 0x73, 0x5e, 0xe8, 0xe8, 0x73, 0x3e, 0x27, 0x5d, 0xb6, + 0x7a, 0xf2, 0xc7, 0x8f, 0x4a, 0xd2, 0xc9, 0x30, 0x4c, 0x86, 0x73, 0xf1, 0x16, 0x20, 0xbd, 0x95, + 0xe2, 0xe5, 0x15, 0x86, 0x7e, 0x97, 0xb7, 0x57, 0x0c, 0xe2, 0x93, 0xd9, 0x12, 0x6e, 0xea, 0xf3, + 0xb4, 0x37, 0x45, 0x1c, 0x80, 0x25, 0x23, 0xfb, 0x65, 0xc8, 0x0c, 0x9b, 0xd1, 0x59, 0x7b, 0x61, + 0x7f, 0x1c, 0xc6, 0x59, 0xc9, 0x43, 0x6a, 0x06, 0xec, 0x84, 0xce, 0x35, 0x23, 0x04, 0xa8, 0xfd, + 0x05, 0x0b, 0x46, 0x57, 0x13, 0x91, 0x11, 0x2f, 0x32, 0x2b, 0x6d, 0x86, 0xaa, 0xbf, 0xca, 0xa0, + 0x58, 0x60, 0x8f, 0x5c, 0x15, 0xf6, 0xd7, 0x16, 0xc4, 0x91, 0x6c, 0x8e, 0x41, 0x7c, 0x9b, 0x33, + 0xc4, 0xb7, 0x4c, 0x41, 0x56, 0x35, 0x27, 0x4f, 0x7a, 0x43, 0xd7, 0x55, 0x8c, 0xb7, 0x36, 0x32, + 0x6c, 0xcc, 0x86, 0x4f, 0xc5, 0x11, 0x33, 0x10, 0x9c, 0x8c, 0xfa, 0x66, 0xff, 0x7e, 0x01, 0x90, + 0xa2, 0xed, 0x3a, 0x06, 0x5d, 0xba, 0xc4, 0xd1, 0xc4, 0xa0, 0xdb, 0x05, 0xc4, 0xfc, 0x0c, 0x02, + 0xc7, 0x0b, 0x39, 0x5b, 0x57, 0x28, 0xff, 0x0e, 0xe7, 0xc4, 0x30, 0x25, 0x1f, 0xa5, 0x2d, 0xa7, + 0xb8, 0xe1, 0x8c, 0x1a, 0x34, 0xff, 0x91, 0xde, 0x6e, 0xfd, 0x47, 0xfa, 0x3a, 0xbc, 0xae, 0xfc, + 0x8a, 0x05, 0xc3, 0xaa, 0x9b, 0xde, 0x25, 0x3e, 0xf8, 0xaa, 0x3d, 0x39, 0x1b, 0x68, 0x45, 0x6b, + 0x32, 0x3b, 0x58, 0xbe, 0x83, 0xbd, 0x92, 0x75, 0x1a, 0xee, 0x3d, 0xa2, 0x62, 0x96, 0x96, 0xc5, + 0xab, 0x57, 0x01, 0x3d, 0xd8, 0x2f, 0x0f, 0xab, 0x7f, 0x3c, 0x26, 0x7b, 0x5c, 0x84, 0x6e, 0xc9, + 0xa3, 0x89, 0xa9, 0x88, 0x5e, 0x82, 0xde, 0xe6, 0x96, 0x13, 0x92, 0xc4, 0x5b, 0xa5, 0xde, 0x0a, + 0x05, 0x1e, 0xec, 0x97, 0x47, 0x54, 0x01, 0x06, 0xc1, 0x9c, 0xba, 0xfb, 0xc8, 0x7e, 0xe9, 0xc9, + 0xd9, 0x31, 0xb2, 0xdf, 0x5f, 0x5a, 0xd0, 0xb3, 0xea, 0xd7, 0x8f, 0x63, 0x0b, 0x78, 0xcd, 0xd8, + 0x02, 0xce, 0xe6, 0xa5, 0xcb, 0xc8, 0x5d, 0xfd, 0x8b, 0x89, 0xd5, 0x7f, 0x3e, 0x97, 0x43, 0xfb, + 0x85, 0xbf, 0x03, 0x83, 0x2c, 0x09, 0x87, 0x78, 0x97, 0xf5, 0x82, 0xb1, 0xe0, 0xcb, 0x89, 0x05, + 0x3f, 0xaa, 0x91, 0x6a, 0x2b, 0xfd, 0x29, 0xe8, 0x17, 0x0f, 0x7d, 0x92, 0x8f, 0x8d, 0x05, 0x2d, + 0x96, 0x78, 0xfb, 0xc7, 0x8b, 0x60, 0x24, 0xfd, 0x40, 0xbf, 0x6a, 0xc1, 0x74, 0xc0, 0x1d, 0x80, + 0xeb, 0xf3, 0xad, 0xc0, 0xf5, 0x36, 0xab, 0xb5, 0x2d, 0x52, 0x6f, 0x35, 0x5c, 0x6f, 0x73, 0x69, + 0xd3, 0xf3, 0x15, 0x78, 0xe1, 0x2e, 0xa9, 0xb5, 0x98, 0x71, 0xae, 0x43, 0x86, 0x11, 0xe5, 0x48, + 0xff, 0xfc, 0xfd, 0xfd, 0xf2, 0x34, 0x3e, 0x14, 0x6f, 0x7c, 0xc8, 0xb6, 0xa0, 0xdf, 0xb1, 0xe0, + 0x32, 0xcf, 0x85, 0xd1, 0x7d, 0xfb, 0xdb, 0xdc, 0x96, 0x2b, 0x92, 0x55, 0xcc, 0x64, 0x8d, 0x04, + 0x3b, 0xb3, 0x1f, 0x14, 0x1d, 0x7a, 0xb9, 0x72, 0xb8, 0xba, 0xf0, 0x61, 0x1b, 0x67, 0xff, 0xc3, + 0x22, 0x0c, 0x8b, 0x08, 0x70, 0xe2, 0x0c, 0x78, 0xc9, 0x98, 0x12, 0x8f, 0x26, 0xa6, 0xc4, 0xb8, + 0x41, 0x7c, 0x34, 0xdb, 0x7f, 0x08, 0xe3, 0x74, 0x73, 0xbe, 0x46, 0x9c, 0x20, 0x5a, 0x27, 0x0e, + 0x77, 0x0b, 0x2b, 0x1e, 0x7a, 0xf7, 0x57, 0xfa, 0xc9, 0xe5, 0x24, 0x33, 0x9c, 0xe6, 0xff, 0xed, + 0x74, 0xe6, 0x78, 0x30, 0x96, 0x0a, 0xe2, 0xf7, 0x26, 0x94, 0xd4, 0x2b, 0x15, 0xb1, 0xe9, 0xb4, + 0x8f, 0x85, 0x99, 0xe4, 0xc0, 0xd5, 0x5f, 0xf1, 0x0b, 0xa9, 0x98, 0x9d, 0xfd, 0x77, 0x0b, 0x46, + 0x85, 0x7c, 0x10, 0x57, 0x61, 0xc0, 0x09, 0x43, 0x77, 0xd3, 0x23, 0xf5, 0x76, 0x1a, 0xca, 0x54, + 0x35, 0xec, 0xa5, 0xd0, 0x8c, 0x28, 0x89, 0x15, 0x0f, 0x74, 0x8d, 0x3b, 0xdf, 0xed, 0x92, 0x76, + 0xea, 0xc9, 0x14, 0x37, 0x90, 0xee, 0x79, 0xbb, 0x04, 0x8b, 0xf2, 0xe8, 0x93, 0xdc, 0x3b, 0xf2, + 0xba, 0xe7, 0xdf, 0xf1, 0xae, 0xfa, 0xbe, 0x8c, 0xf6, 0xd1, 0x1d, 0xc3, 0x71, 0xe9, 0x13, 0xa9, + 0x8a, 0x63, 0x93, 0x5b, 0x77, 0x51, 0x71, 0x3f, 0x0b, 0x2c, 0xf6, 0xbf, 0xf9, 0x28, 0x3c, 0x44, + 0x04, 0x46, 0x45, 0x78, 0x41, 0x09, 0x13, 0x7d, 0x97, 0x79, 0x95, 0x33, 0x4b, 0xc7, 0x8a, 0xf4, + 0xeb, 0x26, 0x0b, 0x9c, 0xe4, 0x69, 0xff, 0x8c, 0x05, 0xec, 0x81, 0xec, 0x31, 0xc8, 0x23, 0x1f, + 0x31, 0xe5, 0x91, 0xc9, 0xbc, 0x4e, 0xce, 0x11, 0x45, 0x5e, 0xe4, 0x33, 0xab, 0x12, 0xf8, 0x77, + 0xf7, 0x84, 0x4b, 0x4b, 0xe7, 0xfb, 0x87, 0xfd, 0xdf, 0x2d, 0xbe, 0x89, 0xc5, 0xe1, 0x04, 0x3e, + 0x07, 0x03, 0x35, 0xa7, 0xe9, 0xd4, 0x78, 0x86, 0xaa, 0x5c, 0x8d, 0x9e, 0x51, 0x68, 0x7a, 0x4e, + 0x94, 0xe0, 0x1a, 0x2a, 0x19, 0xa6, 0x72, 0x40, 0x82, 0x3b, 0x6a, 0xa5, 0x54, 0x95, 0x53, 0xdb, + 0x30, 0x6c, 0x30, 0x7b, 0xa8, 0xea, 0x8c, 0xcf, 0xf1, 0x23, 0x56, 0x85, 0x55, 0xdd, 0x81, 0x71, + 0x4f, 0xfb, 0x4f, 0x0f, 0x14, 0x79, 0xb9, 0x7c, 0xbc, 0xd3, 0x21, 0xca, 0x4e, 0x1f, 0xed, 0xed, + 0x6d, 0x82, 0x0d, 0x4e, 0x73, 0xb6, 0x7f, 0xc2, 0x82, 0xd3, 0x3a, 0xa1, 0xf6, 0xbc, 0xa7, 0x93, + 0x91, 0x64, 0x1e, 0x06, 0xfc, 0x26, 0x09, 0x9c, 0xc8, 0x0f, 0xc4, 0xa9, 0x71, 0x49, 0x76, 0xfa, + 0x0d, 0x01, 0x3f, 0x10, 0xf9, 0x16, 0x24, 0x77, 0x09, 0xc7, 0xaa, 0x24, 0xbd, 0x7d, 0xb2, 0xce, + 0x08, 0xc5, 0x43, 0x2e, 0xb6, 0x07, 0x30, 0x7b, 0x7b, 0x88, 0x05, 0xc6, 0xfe, 0x33, 0x8b, 0x4f, + 0x2c, 0xbd, 0xe9, 0xe8, 0x6d, 0x18, 0xdb, 0x71, 0xa2, 0xda, 0xd6, 0xc2, 0xdd, 0x66, 0xc0, 0x4d, + 0x4e, 0xb2, 0x9f, 0x9e, 0xe9, 0xd4, 0x4f, 0xda, 0x47, 0xc6, 0x0e, 0x9f, 0x2b, 0x09, 0x66, 0x38, + 0xc5, 0x1e, 0xad, 0xc3, 0x20, 0x83, 0xb1, 0x37, 0x8a, 0x61, 0x3b, 0xd1, 0x20, 0xaf, 0x36, 0xe5, + 0xb2, 0xb0, 0x12, 0xf3, 0xc1, 0x3a, 0x53, 0xfb, 0xcb, 0x45, 0xbe, 0xda, 0x99, 0x28, 0xff, 0x14, + 0xf4, 0x37, 0xfd, 0xfa, 0xdc, 0xd2, 0x3c, 0x16, 0xa3, 0xa0, 0x8e, 0x91, 0x0a, 0x07, 0x63, 0x89, + 0x47, 0x97, 0x60, 0x40, 0xfc, 0x94, 0x26, 0x42, 0xb6, 0x37, 0x0b, 0xba, 0x10, 0x2b, 0x2c, 0x7a, + 0x1e, 0xa0, 0x19, 0xf8, 0xbb, 0x6e, 0x9d, 0xc5, 0x2c, 0x29, 0x9a, 0xde, 0x46, 0x15, 0x85, 0xc1, + 0x1a, 0x15, 0x7a, 0x15, 0x86, 0x5b, 0x5e, 0xc8, 0xc5, 0x11, 0x2d, 0x32, 0xb4, 0xf2, 0x83, 0xb9, + 0xa9, 0x23, 0xb1, 0x49, 0x8b, 0x66, 0xa0, 0x2f, 0x72, 0x98, 0xf7, 0x4c, 0x6f, 0xbe, 0x53, 0xf0, + 0x1a, 0xa5, 0xd0, 0x93, 0x21, 0xd1, 0x02, 0x58, 0x14, 0x44, 0x6f, 0xca, 0xe7, 0xc2, 0x7c, 0x63, + 0x17, 0xde, 0xf8, 0xdd, 0x1d, 0x02, 0xda, 0x63, 0x61, 0xe1, 0xe5, 0x6f, 0xf0, 0x42, 0xaf, 0x00, + 0x90, 0xbb, 0x11, 0x09, 0x3c, 0xa7, 0xa1, 0x7c, 0xde, 0x94, 0x5c, 0x30, 0xef, 0xaf, 0xfa, 0xd1, + 0xcd, 0x90, 0x2c, 0x28, 0x0a, 0xac, 0x51, 0xdb, 0xbf, 0x53, 0x02, 0x88, 0xe5, 0x76, 0x74, 0x2f, + 0xb5, 0x71, 0x3d, 0xdb, 0x5e, 0xd2, 0x3f, 0xba, 0x5d, 0x0b, 0x7d, 0x9f, 0x05, 0x83, 0x22, 0x34, + 0x0b, 0x1b, 0xa1, 0x42, 0xfb, 0x8d, 0xd3, 0x8c, 0x10, 0x43, 0x4b, 0xf0, 0x26, 0xbc, 0x20, 0x67, + 0xa8, 0x86, 0xe9, 0xd8, 0x0a, 0xbd, 0x62, 0xf4, 0x7e, 0x79, 0x55, 0x2c, 0x1a, 0x5d, 0xa9, 0xae, + 0x8a, 0x25, 0x76, 0x46, 0xe8, 0xb7, 0xc4, 0x9b, 0xc6, 0x2d, 0xb1, 0x27, 0xff, 0x3d, 0xa4, 0x21, + 0xbe, 0x76, 0xba, 0x20, 0xa2, 0x8a, 0x1e, 0x1b, 0xa1, 0x37, 0xff, 0x11, 0x9f, 0x76, 0x4f, 0xea, + 0x10, 0x17, 0xe1, 0x33, 0x30, 0x5a, 0x37, 0x85, 0x00, 0x31, 0x13, 0x9f, 0xcc, 0xe3, 0x9b, 0x90, + 0x19, 0xe2, 0x63, 0x3f, 0x81, 0xc0, 0x49, 0xc6, 0xa8, 0xc2, 0x43, 0x65, 0x2c, 0x79, 0x1b, 0xbe, + 0x78, 0x11, 0x62, 0xe7, 0x8e, 0xe5, 0x5e, 0x18, 0x91, 0x1d, 0x4a, 0x19, 0x9f, 0xee, 0xab, 0xa2, + 0x2c, 0x56, 0x5c, 0xd0, 0xeb, 0xd0, 0xc7, 0x5e, 0x71, 0x85, 0x93, 0x03, 0xf9, 0x1a, 0x67, 0x33, + 0x66, 0x60, 0xbc, 0x20, 0xd9, 0xdf, 0x10, 0x0b, 0x0e, 0xe8, 0x9a, 0x7c, 0x23, 0x19, 0x2e, 0x79, + 0x37, 0x43, 0xc2, 0xde, 0x48, 0x96, 0x66, 0x1f, 0x8f, 0x9f, 0x3f, 0x72, 0x78, 0x66, 0xca, 0x44, + 0xa3, 0x24, 0x95, 0xa2, 0xc4, 0x7f, 0x99, 0x89, 0x51, 0x44, 0x38, 0xca, 0x6c, 0x9e, 0x99, 0xad, + 0x31, 0xee, 0xce, 0x5b, 0x26, 0x0b, 0x9c, 0xe4, 0x49, 0x25, 0x52, 0xbe, 0xea, 0xc5, 0x9b, 0x92, + 0x4e, 0x7b, 0x07, 0xbf, 0x88, 0xb3, 0xd3, 0x88, 0x43, 0xb0, 0x28, 0x7f, 0xac, 0xe2, 0xc1, 0x94, + 0x07, 0x63, 0xc9, 0x25, 0xfa, 0x50, 0xc5, 0x91, 0x3f, 0xe9, 0x81, 0x11, 0x73, 0x4a, 0xa1, 0xcb, + 0x50, 0x12, 0x4c, 0x54, 0x36, 0x13, 0xb5, 0x4a, 0x56, 0x24, 0x02, 0xc7, 0x34, 0x2c, 0x89, 0x0d, + 0x2b, 0xae, 0x39, 0x11, 0xc7, 0x49, 0x6c, 0x14, 0x06, 0x6b, 0x54, 0xf4, 0x62, 0xb5, 0xee, 0xfb, + 0x91, 0x3a, 0x90, 0xd4, 0xbc, 0x9b, 0x65, 0x50, 0x2c, 0xb0, 0xf4, 0x20, 0xda, 0x26, 0x81, 0x47, + 0x1a, 0x66, 0x14, 0x71, 0x75, 0x10, 0x5d, 0xd7, 0x91, 0xd8, 0xa4, 0xa5, 0xc7, 0xa9, 0x1f, 0xb2, + 0x89, 0x2c, 0xae, 0x6f, 0xb1, 0x53, 0x76, 0x95, 0x3f, 0x2f, 0x97, 0x78, 0xf4, 0x71, 0x38, 0xad, + 0x22, 0x76, 0x61, 0x6e, 0xcd, 0x90, 0x35, 0xf6, 0x19, 0xda, 0x96, 0xd3, 0x73, 0xd9, 0x64, 0x38, + 0xaf, 0x3c, 0x7a, 0x0d, 0x46, 0x84, 0x88, 0x2f, 0x39, 0xf6, 0x9b, 0x1e, 0x46, 0xd7, 0x0d, 0x2c, + 0x4e, 0x50, 0xcb, 0x38, 0xe8, 0x4c, 0xca, 0x96, 0x1c, 0x06, 0xd2, 0x71, 0xd0, 0x75, 0x3c, 0x4e, + 0x95, 0x40, 0x33, 0x30, 0xca, 0x65, 0x30, 0xd7, 0xdb, 0xe4, 0x63, 0x22, 0x9e, 0x7c, 0xa9, 0x25, + 0x75, 0xc3, 0x44, 0xe3, 0x24, 0x3d, 0x7a, 0x19, 0x86, 0x9c, 0xa0, 0xb6, 0xe5, 0x46, 0xa4, 0x16, + 0xb5, 0x02, 0xfe, 0x16, 0x4c, 0x73, 0xd1, 0x9a, 0xd1, 0x70, 0xd8, 0xa0, 0xb4, 0xef, 0xc1, 0x44, + 0x46, 0xdc, 0x09, 0x3a, 0x71, 0x9c, 0xa6, 0x2b, 0xbf, 0x29, 0xe1, 0x07, 0x3d, 0x53, 0x59, 0x92, + 0x5f, 0xa3, 0x51, 0xd1, 0xd9, 0xc9, 0xe2, 0x53, 0x68, 0x89, 0x57, 0xd5, 0xec, 0x5c, 0x94, 0x08, + 0x1c, 0xd3, 0xd8, 0xff, 0xa9, 0x00, 0xa3, 0x19, 0xb6, 0x15, 0x96, 0xfc, 0x33, 0x71, 0x49, 0x89, + 0x73, 0x7d, 0x9a, 0x61, 0xf5, 0x0b, 0x87, 0x08, 0xab, 0x5f, 0xec, 0x14, 0x56, 0xbf, 0xe7, 0x9d, + 0x84, 0xd5, 0x37, 0x7b, 0xac, 0xb7, 0xab, 0x1e, 0xcb, 0x08, 0xc5, 0xdf, 0x77, 0xc8, 0x50, 0xfc, + 0x46, 0xa7, 0xf7, 0x77, 0xd1, 0xe9, 0x3f, 0x52, 0x80, 0xb1, 0xa4, 0x2b, 0xe9, 0x31, 0xe8, 0x6d, + 0x5f, 0x37, 0xf4, 0xb6, 0x97, 0xba, 0x79, 0xa2, 0x9b, 0xab, 0xc3, 0xc5, 0x09, 0x1d, 0xee, 0xd3, + 0x5d, 0x71, 0x6b, 0xaf, 0xcf, 0xfd, 0xc9, 0x02, 0x9c, 0xcc, 0x7c, 0x23, 0x7c, 0x0c, 0x7d, 0x73, + 0xc3, 0xe8, 0x9b, 0xe7, 0xba, 0x7e, 0xbe, 0x9c, 0xdb, 0x41, 0xb7, 0x13, 0x1d, 0x74, 0xb9, 0x7b, + 0x96, 0xed, 0x7b, 0xe9, 0xeb, 0x45, 0x38, 0x9f, 0x59, 0x2e, 0x56, 0x7b, 0x2e, 0x1a, 0x6a, 0xcf, + 0xe7, 0x13, 0x6a, 0x4f, 0xbb, 0x7d, 0xe9, 0xa3, 0xd1, 0x83, 0x8a, 0x67, 0xbc, 0x2c, 0x18, 0xc1, + 0x03, 0xea, 0x40, 0x8d, 0x67, 0xbc, 0x8a, 0x11, 0x36, 0xf9, 0x7e, 0x3b, 0xe9, 0x3e, 0x7f, 0xdb, + 0x82, 0x33, 0x99, 0x63, 0x73, 0x0c, 0xba, 0xae, 0x55, 0x53, 0xd7, 0xf5, 0x54, 0xd7, 0xb3, 0x35, + 0x47, 0xf9, 0xf5, 0xd3, 0xbd, 0x39, 0xdf, 0xc2, 0x6e, 0xf2, 0x37, 0x60, 0xd0, 0xa9, 0xd5, 0x48, + 0x18, 0xae, 0xf8, 0x75, 0x15, 0x81, 0xfb, 0x39, 0x76, 0xcf, 0x8a, 0xc1, 0x07, 0xfb, 0xe5, 0xa9, + 0x24, 0x8b, 0x18, 0x8d, 0x75, 0x0e, 0xe8, 0x93, 0x30, 0x10, 0x8a, 0x73, 0x53, 0x8c, 0xfd, 0x0b, + 0x5d, 0x76, 0x8e, 0xb3, 0x4e, 0x1a, 0x66, 0xa8, 0x27, 0xa5, 0xa9, 0x50, 0x2c, 0xcd, 0xb0, 0x30, + 0x85, 0x23, 0x0d, 0x0b, 0xf3, 0x3c, 0xc0, 0xae, 0xba, 0x0c, 0x24, 0xf5, 0x0f, 0xda, 0x35, 0x41, + 0xa3, 0x42, 0x1f, 0x85, 0xb1, 0x90, 0xc7, 0x42, 0x9c, 0x6b, 0x38, 0x21, 0x7b, 0x6d, 0x23, 0x66, + 0x21, 0x0b, 0x27, 0x55, 0x4d, 0xe0, 0x70, 0x8a, 0x1a, 0x2d, 0xca, 0x5a, 0x59, 0xe0, 0x46, 0x3e, + 0x31, 0x2f, 0xc6, 0x35, 0x8a, 0xd4, 0xe3, 0x27, 0x92, 0xdd, 0xcf, 0x3a, 0x5e, 0x2b, 0x89, 0x3e, + 0x09, 0x40, 0xa7, 0x8f, 0xd0, 0x43, 0xf4, 0xe7, 0x6f, 0x9e, 0x74, 0x57, 0xa9, 0x67, 0x3a, 0x37, + 0xb3, 0x97, 0xb7, 0xf3, 0x8a, 0x09, 0xd6, 0x18, 0x22, 0x07, 0x86, 0xe3, 0x7f, 0x71, 0x66, 0xde, + 0x4b, 0xb9, 0x35, 0x24, 0x99, 0x33, 0x95, 0xf7, 0xbc, 0xce, 0x02, 0x9b, 0x1c, 0xed, 0x7f, 0x37, + 0x00, 0x8f, 0xb4, 0xd9, 0x86, 0xd1, 0x8c, 0x69, 0xea, 0x7d, 0x26, 0x79, 0x7f, 0x9f, 0xca, 0x2c, + 0x6c, 0x5c, 0xe8, 0x13, 0xb3, 0xbd, 0xf0, 0x8e, 0x67, 0xfb, 0x0f, 0x59, 0x9a, 0x66, 0x85, 0x3b, + 0x95, 0x7e, 0xe4, 0x90, 0xc7, 0xcb, 0x11, 0xaa, 0x5a, 0x36, 0x32, 0xf4, 0x15, 0xcf, 0x77, 0xdd, + 0x9c, 0xee, 0x15, 0x18, 0x5f, 0xcd, 0x0e, 0x00, 0xcc, 0x55, 0x19, 0x57, 0x0f, 0xfb, 0xfd, 0xc7, + 0x15, 0x0c, 0xf8, 0xf7, 0x2d, 0x38, 0x93, 0x02, 0xf3, 0x36, 0x90, 0x50, 0xc4, 0xa8, 0x5a, 0x7d, + 0xc7, 0x8d, 0x97, 0x0c, 0xf9, 0x37, 0x5c, 0x13, 0xdf, 0x70, 0x26, 0x97, 0x2e, 0xd9, 0xf4, 0x1f, + 0xfc, 0xa3, 0xf2, 0x04, 0xab, 0xc0, 0x24, 0xc4, 0xf9, 0x4d, 0x3f, 0xde, 0x8b, 0xff, 0x37, 0x27, + 0xf6, 0xf1, 0xd4, 0x32, 0x9c, 0x6f, 0xdf, 0xd5, 0x87, 0x7a, 0x9e, 0xfc, 0x7b, 0x16, 0x9c, 0x6b, + 0x1b, 0x03, 0xe7, 0x5b, 0x50, 0xce, 0xb5, 0x3f, 0x6f, 0xc1, 0xa3, 0x99, 0x25, 0x0c, 0xef, 0xb8, + 0xcb, 0x50, 0xaa, 0x25, 0xf2, 0xa1, 0xc6, 0xd1, 0x20, 0x54, 0x2e, 0xd4, 0x98, 0xc6, 0x70, 0x82, + 0x2b, 0x74, 0x74, 0x82, 0xfb, 0x0d, 0x0b, 0x52, 0x67, 0xd5, 0x31, 0x08, 0x4d, 0x4b, 0xa6, 0xd0, + 0xf4, 0x78, 0x37, 0xbd, 0x99, 0x23, 0x2f, 0xfd, 0xc5, 0x28, 0x9c, 0xca, 0x79, 0x5d, 0xb8, 0x0b, + 0xe3, 0x9b, 0x35, 0x62, 0x3e, 0x27, 0x6f, 0x17, 0x66, 0xa9, 0xed, 0xdb, 0x73, 0x9e, 0x86, 0x36, + 0x45, 0x82, 0xd3, 0x55, 0xa0, 0xcf, 0x5b, 0x70, 0xc2, 0xb9, 0x13, 0x2e, 0x50, 0xe1, 0xd7, 0xad, + 0xcd, 0x36, 0xfc, 0xda, 0x36, 0x95, 0x2c, 0xe4, 0xb2, 0x7a, 0x31, 0x53, 0x21, 0x79, 0xbb, 0x9a, + 0xa2, 0x37, 0xaa, 0x67, 0x49, 0xc7, 0xb3, 0xa8, 0x70, 0x66, 0x5d, 0x08, 0x8b, 0xfc, 0x28, 0xf4, + 0x6a, 0xdd, 0x26, 0xe0, 0x41, 0xd6, 0x33, 0x50, 0x2e, 0xcd, 0x49, 0x0c, 0x56, 0x7c, 0xd0, 0xa7, + 0xa1, 0xb4, 0x29, 0xdf, 0x36, 0x67, 0x48, 0x8b, 0x71, 0x47, 0xb6, 0x7f, 0xf1, 0xcd, 0xbd, 0x0a, + 0x14, 0x11, 0x8e, 0x99, 0xa2, 0xd7, 0xa0, 0xe8, 0x6d, 0x84, 0xed, 0xf2, 0x76, 0x27, 0xdc, 0x47, + 0x79, 0x58, 0x91, 0xd5, 0xc5, 0x2a, 0xa6, 0x05, 0xd1, 0x35, 0x28, 0x06, 0xeb, 0x75, 0xa1, 0x4d, + 0xcf, 0x5c, 0xa4, 0x78, 0x76, 0x3e, 0xa7, 0x55, 0x8c, 0x13, 0x9e, 0x9d, 0xc7, 0x94, 0x05, 0xaa, + 0x40, 0x2f, 0x7b, 0x92, 0x27, 0x64, 0xb3, 0xcc, 0x5b, 0x68, 0x9b, 0xa7, 0xad, 0x3c, 0xf6, 0x08, + 0x23, 0xc0, 0x9c, 0x11, 0x5a, 0x83, 0xbe, 0x1a, 0xcb, 0xf1, 0x2c, 0x84, 0xb1, 0xf7, 0x67, 0xea, + 0xcd, 0xdb, 0x24, 0xbf, 0x16, 0x6a, 0x64, 0x46, 0x81, 0x05, 0x2f, 0xc6, 0x95, 0x34, 0xb7, 0x36, + 0x42, 0xa6, 0x77, 0xcb, 0xe3, 0xda, 0x26, 0xa7, 0xbb, 0xe0, 0xca, 0x28, 0xb0, 0xe0, 0x85, 0x5e, + 0x81, 0xc2, 0x46, 0x4d, 0x3c, 0xb7, 0xcb, 0x54, 0xa0, 0x9b, 0x91, 0x61, 0x66, 0xfb, 0xee, 0xef, + 0x97, 0x0b, 0x8b, 0x73, 0xb8, 0xb0, 0x51, 0x43, 0xab, 0xd0, 0xbf, 0xc1, 0x63, 0x49, 0x08, 0x1d, + 0xf9, 0x93, 0xd9, 0x61, 0x2e, 0x52, 0xe1, 0x26, 0xf8, 0xd3, 0x2d, 0x81, 0xc0, 0x92, 0x09, 0x4b, + 0xd7, 0xa1, 0x62, 0x62, 0x88, 0x90, 0x7c, 0xd3, 0x87, 0x8b, 0x63, 0xc2, 0x65, 0xe5, 0x38, 0xb2, + 0x06, 0xd6, 0x38, 0xd2, 0x59, 0xed, 0xdc, 0x6b, 0x05, 0x2c, 0x5e, 0xbb, 0x88, 0xdd, 0x94, 0x39, + 0xab, 0x67, 0x24, 0x51, 0xbb, 0x59, 0xad, 0x88, 0x70, 0xcc, 0x14, 0x6d, 0xc3, 0xf0, 0x6e, 0xd8, + 0xdc, 0x22, 0x72, 0x49, 0xb3, 0x50, 0x4e, 0x39, 0xb2, 0xde, 0x2d, 0x41, 0xe8, 0x06, 0x51, 0xcb, + 0x69, 0xa4, 0x76, 0x21, 0x26, 0x97, 0xdf, 0xd2, 0x99, 0x61, 0x93, 0x37, 0xed, 0xfe, 0xb7, 0x5b, + 0xfe, 0xfa, 0x5e, 0x44, 0x44, 0x24, 0xbd, 0xcc, 0xee, 0x7f, 0x83, 0x93, 0xa4, 0xbb, 0x5f, 0x20, + 0xb0, 0x64, 0x82, 0x6e, 0x89, 0xee, 0x61, 0xbb, 0xe7, 0x58, 0x7e, 0x98, 0xde, 0x19, 0x49, 0x94, + 0xd3, 0x29, 0x6c, 0xb7, 0x8c, 0x59, 0xb1, 0x5d, 0xb2, 0xb9, 0xe5, 0x47, 0xbe, 0x97, 0xd8, 0xa1, + 0xc7, 0xf3, 0x77, 0xc9, 0x4a, 0x06, 0x7d, 0x7a, 0x97, 0xcc, 0xa2, 0xc2, 0x99, 0x75, 0xa1, 0x3a, + 0x8c, 0x34, 0xfd, 0x20, 0xba, 0xe3, 0x07, 0x72, 0x7e, 0xa1, 0x36, 0x3a, 0x3e, 0x83, 0x52, 0xd4, + 0xc8, 0x82, 0x54, 0x9a, 0x18, 0x9c, 0xe0, 0x89, 0x3e, 0x06, 0xfd, 0x61, 0xcd, 0x69, 0x90, 0xa5, + 0x1b, 0x93, 0x13, 0xf9, 0xc7, 0x4f, 0x95, 0x93, 0xe4, 0xcc, 0x2e, 0x1e, 0x0a, 0x84, 0x93, 0x60, + 0xc9, 0x0e, 0x2d, 0x42, 0x2f, 0x4b, 0x83, 0xc9, 0xc2, 0x3e, 0xe6, 0x44, 0x1b, 0x4e, 0x39, 0xf3, + 0xf3, 0xbd, 0x89, 0x81, 0x31, 0x2f, 0x4e, 0xd7, 0x80, 0xb8, 0xea, 0xfa, 0xe1, 0xe4, 0xc9, 0xfc, + 0x35, 0x20, 0x6e, 0xc8, 0x37, 0xaa, 0xed, 0xd6, 0x80, 0x22, 0xc2, 0x31, 0x53, 0xba, 0x33, 0xd3, + 0xdd, 0xf4, 0x54, 0x1b, 0x2f, 0xb4, 0xdc, 0xbd, 0x94, 0xed, 0xcc, 0x74, 0x27, 0xa5, 0x2c, 0xec, + 0x3f, 0xee, 0x4f, 0xcb, 0x2c, 0x4c, 0x39, 0xf2, 0xbf, 0x5b, 0x29, 0xbb, 0xf9, 0x07, 0xba, 0xd5, + 0xd5, 0x1e, 0xe1, 0xb5, 0xee, 0xf3, 0x16, 0x9c, 0x6a, 0x66, 0x7e, 0x88, 0x10, 0x00, 0xba, 0x53, + 0xf9, 0xf2, 0x4f, 0x57, 0x21, 0x42, 0xb3, 0xf1, 0x38, 0xa7, 0xa6, 0xe4, 0xd5, 0xb9, 0xf8, 0x8e, + 0xaf, 0xce, 0x2b, 0x30, 0x50, 0xe3, 0xf7, 0x1c, 0x19, 0xda, 0xba, 0xab, 0x00, 0x77, 0x4c, 0x94, + 0x10, 0x17, 0xa4, 0x0d, 0xac, 0x58, 0xa0, 0x1f, 0xb6, 0xe0, 0x5c, 0xb2, 0xe9, 0x98, 0x30, 0xb4, + 0x88, 0x2b, 0xca, 0xf5, 0x32, 0x8b, 0xe2, 0xfb, 0x53, 0xf2, 0xbf, 0x41, 0x7c, 0xd0, 0x89, 0x00, + 0xb7, 0xaf, 0x0c, 0xcd, 0x67, 0x28, 0x86, 0xfa, 0x4c, 0x63, 0x58, 0x17, 0xca, 0xa1, 0x17, 0x61, + 0x68, 0xc7, 0x6f, 0x79, 0x91, 0x70, 0x5a, 0x13, 0x0e, 0x34, 0xcc, 0x71, 0x64, 0x45, 0x83, 0x63, + 0x83, 0x2a, 0xa1, 0x52, 0x1a, 0x78, 0x60, 0x95, 0xd2, 0x5b, 0x30, 0xe4, 0x69, 0x5e, 0xd6, 0x42, + 0x1e, 0xb8, 0x98, 0x1f, 0x13, 0x58, 0xf7, 0xc9, 0xe6, 0xad, 0xd4, 0x21, 0xd8, 0xe0, 0x76, 0xbc, + 0xde, 0x6c, 0x3f, 0x5f, 0xc8, 0x10, 0xea, 0xb9, 0x5a, 0xe9, 0xc3, 0xa6, 0x5a, 0xe9, 0x62, 0x52, + 0xad, 0x94, 0x32, 0x84, 0x18, 0x1a, 0xa5, 0xee, 0x53, 0x64, 0x75, 0x1d, 0x57, 0xf4, 0xbb, 0x2d, + 0x38, 0xcd, 0x34, 0xeb, 0xb4, 0x82, 0x77, 0xac, 0x4d, 0x7f, 0xe4, 0xfe, 0x7e, 0xf9, 0xf4, 0x72, + 0x36, 0x3b, 0x9c, 0x57, 0x8f, 0xdd, 0x80, 0x0b, 0x9d, 0x8e, 0x46, 0xe6, 0x41, 0x59, 0x57, 0xa6, + 0xf7, 0xd8, 0x83, 0xb2, 0xbe, 0x34, 0x8f, 0x19, 0xa6, 0xdb, 0xa8, 0x59, 0xf6, 0x7f, 0xb0, 0xa0, + 0x58, 0xf1, 0xeb, 0xc7, 0x70, 0xe9, 0xfe, 0x88, 0x71, 0xe9, 0x7e, 0x24, 0xfb, 0x50, 0xae, 0xe7, + 0x9a, 0x92, 0x16, 0x12, 0xa6, 0xa4, 0x73, 0x79, 0x0c, 0xda, 0x1b, 0x8e, 0x7e, 0xaa, 0x08, 0x83, + 0x15, 0xbf, 0xae, 0x9e, 0x2f, 0xfc, 0xe3, 0x07, 0x79, 0xbe, 0x90, 0x9b, 0xf4, 0x44, 0xe3, 0xcc, + 0x1c, 0x2f, 0xe5, 0xcb, 0xed, 0x6f, 0xb1, 0x57, 0x0c, 0xb7, 0x89, 0xbb, 0xb9, 0x15, 0x91, 0x7a, + 0xf2, 0x73, 0x8e, 0xef, 0x15, 0xc3, 0x1f, 0x17, 0x60, 0x34, 0x51, 0x3b, 0x6a, 0xc0, 0x70, 0x43, + 0x37, 0x54, 0x88, 0x79, 0xfa, 0x40, 0x36, 0x0e, 0xe1, 0x05, 0xae, 0x81, 0xb0, 0xc9, 0x1c, 0x4d, + 0x03, 0x28, 0xcb, 0xbd, 0x54, 0x57, 0xb3, 0x9b, 0x87, 0x32, 0xed, 0x87, 0x58, 0xa3, 0x40, 0x2f, + 0xc1, 0x60, 0xe4, 0x37, 0xfd, 0x86, 0xbf, 0xb9, 0x77, 0x9d, 0xc8, 0x80, 0x6a, 0xca, 0xb7, 0x73, + 0x2d, 0x46, 0x61, 0x9d, 0x0e, 0xdd, 0x85, 0x71, 0xc5, 0xa4, 0x7a, 0x04, 0xc6, 0x1b, 0xa6, 0xd9, + 0x58, 0x4d, 0x72, 0xc4, 0xe9, 0x4a, 0xec, 0x9f, 0x2d, 0xf2, 0x2e, 0xf6, 0x22, 0xf7, 0xbd, 0xd5, + 0xf0, 0xee, 0x5e, 0x0d, 0x5f, 0xb7, 0x60, 0x8c, 0xd6, 0xce, 0x1c, 0xd7, 0xa4, 0xa8, 0xa1, 0x22, + 0xa1, 0x5b, 0x6d, 0x22, 0xa1, 0x5f, 0xa4, 0xbb, 0x66, 0xdd, 0x6f, 0x45, 0x42, 0x7f, 0xa8, 0x6d, + 0x8b, 0x14, 0x8a, 0x05, 0x56, 0xd0, 0x91, 0x20, 0x10, 0x8f, 0x6d, 0x75, 0x3a, 0x12, 0x04, 0x58, + 0x60, 0x65, 0xa0, 0xf4, 0x9e, 0xec, 0x40, 0xe9, 0x3c, 0xde, 0xad, 0x70, 0x71, 0x12, 0x42, 0x9f, + 0x16, 0xef, 0x56, 0xfa, 0x3e, 0xc5, 0x34, 0xf6, 0x57, 0x8b, 0x30, 0x54, 0xf1, 0xeb, 0xb1, 0xd5, + 0xfe, 0x45, 0xc3, 0x6a, 0x7f, 0x21, 0x61, 0xb5, 0x1f, 0xd3, 0x69, 0xdf, 0xb3, 0xd1, 0x7f, 0xb3, + 0x6c, 0xf4, 0xbf, 0x6e, 0xb1, 0x51, 0x9b, 0x5f, 0xad, 0x72, 0x3f, 0x48, 0x74, 0x05, 0x06, 0xd9, + 0x06, 0xc3, 0x5e, 0x77, 0x4b, 0x53, 0x36, 0x4b, 0x5c, 0xb6, 0x1a, 0x83, 0xb1, 0x4e, 0x83, 0x2e, + 0xc1, 0x40, 0x48, 0x9c, 0xa0, 0xb6, 0xa5, 0x76, 0x57, 0x61, 0x77, 0xe6, 0x30, 0xac, 0xb0, 0xe8, + 0x8d, 0x38, 0xd4, 0x6a, 0x31, 0xff, 0xb5, 0xa8, 0xde, 0x1e, 0xbe, 0x44, 0xf2, 0xe3, 0xab, 0xda, + 0xb7, 0x01, 0xa5, 0xe9, 0xbb, 0x08, 0x06, 0x58, 0x36, 0x83, 0x01, 0x96, 0x52, 0x81, 0x00, 0xff, + 0xca, 0x82, 0x91, 0x8a, 0x5f, 0xa7, 0x4b, 0xf7, 0xdb, 0x69, 0x9d, 0xea, 0x71, 0xa6, 0xfb, 0xda, + 0xc4, 0x99, 0x7e, 0x0c, 0x7a, 0x2b, 0x7e, 0xbd, 0x43, 0xc0, 0xc2, 0xbf, 0x61, 0x41, 0x7f, 0xc5, + 0xaf, 0x1f, 0x83, 0x69, 0xe2, 0xc3, 0xa6, 0x69, 0xe2, 0x74, 0xce, 0xbc, 0xc9, 0xb1, 0x46, 0xfc, + 0xff, 0x3d, 0x30, 0x4c, 0xdb, 0xe9, 0x6f, 0xca, 0xa1, 0x34, 0xba, 0xcd, 0xea, 0xa2, 0xdb, 0xa8, + 0x14, 0xee, 0x37, 0x1a, 0xfe, 0x9d, 0xe4, 0xb0, 0x2e, 0x32, 0x28, 0x16, 0x58, 0xf4, 0x2c, 0x0c, + 0x34, 0x03, 0xb2, 0xeb, 0xfa, 0x42, 0xbc, 0xd5, 0x0c, 0x3d, 0x15, 0x01, 0xc7, 0x8a, 0x82, 0x5e, + 0x4d, 0x43, 0xd7, 0xa3, 0x47, 0x79, 0xcd, 0xf7, 0xea, 0x5c, 0x7b, 0x5f, 0x14, 0xc9, 0x50, 0x34, + 0x38, 0x36, 0xa8, 0xd0, 0x6d, 0x28, 0xb1, 0xff, 0x6c, 0xdb, 0x39, 0x7c, 0x1a, 0x66, 0x91, 0x1e, + 0x52, 0x30, 0xc0, 0x31, 0x2f, 0xf4, 0x3c, 0x40, 0x24, 0x13, 0x0a, 0x84, 0x22, 0x70, 0x9d, 0xba, + 0x0a, 0xa8, 0x54, 0x03, 0x21, 0xd6, 0xa8, 0xd0, 0x33, 0x50, 0x8a, 0x1c, 0xb7, 0xb1, 0xec, 0x7a, + 0xcc, 0xfe, 0x4b, 0xdb, 0x2f, 0xb2, 0x34, 0x0a, 0x20, 0x8e, 0xf1, 0x54, 0x14, 0x63, 0x41, 0x4d, + 0x78, 0x12, 0xfa, 0x01, 0x46, 0xcd, 0x44, 0xb1, 0x65, 0x05, 0xc5, 0x1a, 0x05, 0xda, 0x82, 0xb3, + 0xae, 0xc7, 0x12, 0x87, 0x90, 0xea, 0xb6, 0xdb, 0x5c, 0x5b, 0xae, 0xde, 0x22, 0x81, 0xbb, 0xb1, + 0x37, 0xeb, 0xd4, 0xb6, 0x89, 0x27, 0x13, 0xec, 0xca, 0xbc, 0xeb, 0x67, 0x97, 0xda, 0xd0, 0xe2, + 0xb6, 0x9c, 0xec, 0x17, 0xd8, 0x7c, 0xbf, 0x51, 0x45, 0x4f, 0x1b, 0x5b, 0xc7, 0x29, 0x7d, 0xeb, + 0x38, 0xd8, 0x2f, 0xf7, 0xdd, 0xa8, 0x6a, 0x31, 0x39, 0x5e, 0x86, 0x93, 0x15, 0xbf, 0x5e, 0xf1, + 0x83, 0x68, 0xd1, 0x0f, 0xee, 0x38, 0x41, 0x5d, 0x4e, 0xaf, 0xb2, 0x8c, 0x4a, 0x42, 0xf7, 0xcf, + 0x5e, 0xbe, 0xbb, 0x18, 0x11, 0x47, 0x5e, 0x60, 0x12, 0xdb, 0x21, 0xdf, 0xd2, 0xd5, 0x98, 0xec, + 0xa0, 0x52, 0xef, 0x5c, 0x75, 0x22, 0x82, 0x6e, 0xb0, 0x14, 0xfa, 0xf1, 0x31, 0x2a, 0x8a, 0x3f, + 0xa5, 0xa5, 0xd0, 0x8f, 0x91, 0x99, 0xe7, 0xae, 0x59, 0xde, 0xfe, 0x9c, 0xa8, 0x84, 0xeb, 0x01, + 0xb8, 0xbf, 0x62, 0x37, 0x39, 0xa8, 0x65, 0x6e, 0x8e, 0x42, 0x7e, 0x52, 0x07, 0x6e, 0x79, 0x6d, + 0x9b, 0x9b, 0xc3, 0xfe, 0x4e, 0x38, 0x95, 0xac, 0xbe, 0xeb, 0x44, 0xd8, 0x73, 0x30, 0x1e, 0xe8, + 0x05, 0xb5, 0x44, 0x67, 0x27, 0x79, 0x3e, 0x85, 0x04, 0x12, 0xa7, 0xe9, 0xed, 0x97, 0x60, 0x9c, + 0xde, 0x3d, 0x95, 0x20, 0xc7, 0x7a, 0xb9, 0x73, 0x78, 0x96, 0xff, 0xd8, 0xcb, 0x0e, 0xa2, 0x44, + 0xd6, 0x1b, 0xf4, 0x29, 0x18, 0x09, 0xc9, 0xb2, 0xeb, 0xb5, 0xee, 0x4a, 0xed, 0x53, 0x9b, 0x47, + 0xa4, 0xd5, 0x05, 0x9d, 0x92, 0xeb, 0xb0, 0x4d, 0x18, 0x4e, 0x70, 0x43, 0x3b, 0x30, 0x72, 0xc7, + 0xf5, 0xea, 0xfe, 0x9d, 0x50, 0xf2, 0x1f, 0xc8, 0x57, 0x65, 0xdf, 0xe6, 0x94, 0x89, 0x36, 0x1a, + 0xd5, 0xdd, 0x36, 0x98, 0xe1, 0x04, 0x73, 0xba, 0xd8, 0x83, 0x96, 0x37, 0x13, 0xde, 0x0c, 0x09, + 0x7f, 0x16, 0x28, 0x16, 0x3b, 0x96, 0x40, 0x1c, 0xe3, 0xe9, 0x62, 0x67, 0x7f, 0xae, 0x06, 0x7e, + 0x8b, 0xa7, 0x58, 0x11, 0x8b, 0x1d, 0x2b, 0x28, 0xd6, 0x28, 0xe8, 0x66, 0xc8, 0xfe, 0xad, 0xfa, + 0x1e, 0xf6, 0xfd, 0x48, 0x6e, 0x9f, 0x2c, 0x45, 0x98, 0x06, 0xc7, 0x06, 0x15, 0x5a, 0x04, 0x14, + 0xb6, 0x9a, 0xcd, 0x06, 0xf3, 0x4e, 0x73, 0x1a, 0x8c, 0x15, 0x77, 0xdb, 0x29, 0xf2, 0x10, 0xd1, + 0xd5, 0x14, 0x16, 0x67, 0x94, 0xa0, 0xe7, 0xe2, 0x86, 0x68, 0x6a, 0x2f, 0x6b, 0x2a, 0x37, 0x7b, + 0x55, 0x79, 0x3b, 0x25, 0x0e, 0x2d, 0x40, 0x7f, 0xb8, 0x17, 0xd6, 0xa2, 0x46, 0xd8, 0x2e, 0x21, + 0x5b, 0x95, 0x91, 0x68, 0xf9, 0x40, 0x79, 0x11, 0x2c, 0xcb, 0xa2, 0x1a, 0x4c, 0x08, 0x8e, 0x73, + 0x5b, 0x8e, 0xa7, 0xd2, 0x44, 0x71, 0x27, 0xfd, 0x2b, 0xf7, 0xf7, 0xcb, 0x13, 0xa2, 0x66, 0x1d, + 0x7d, 0xb0, 0x5f, 0xa6, 0x8b, 0x23, 0x03, 0x83, 0xb3, 0xb8, 0xf1, 0xc9, 0x57, 0xab, 0xf9, 0x3b, + 0xcd, 0x4a, 0xe0, 0x6f, 0xb8, 0x0d, 0xd2, 0xce, 0x74, 0x58, 0x35, 0x28, 0xc5, 0xe4, 0x33, 0x60, + 0x38, 0xc1, 0xcd, 0xfe, 0x1c, 0x93, 0x1d, 0xab, 0xee, 0xa6, 0xe7, 0x44, 0xad, 0x80, 0xa0, 0x1d, + 0x18, 0x6e, 0xb2, 0xdd, 0x45, 0x24, 0x3e, 0x11, 0x73, 0xfd, 0xc5, 0x2e, 0xd5, 0x4f, 0x77, 0x58, + 0xea, 0x36, 0xc3, 0xd5, 0xad, 0xa2, 0xb3, 0xc3, 0x26, 0x77, 0xfb, 0x5f, 0x9c, 0x61, 0xd2, 0x47, + 0x95, 0xeb, 0x94, 0xfa, 0xc5, 0x9b, 0x20, 0x71, 0x8d, 0x9d, 0xca, 0x57, 0xb0, 0xc6, 0xc3, 0x22, + 0xde, 0x15, 0x61, 0x59, 0x16, 0x7d, 0x12, 0x46, 0xe8, 0xad, 0x50, 0x49, 0x00, 0xe1, 0xe4, 0x89, + 0xfc, 0xd8, 0x2d, 0x8a, 0x4a, 0x4f, 0x8a, 0xa4, 0x17, 0xc6, 0x09, 0x66, 0xe8, 0x0d, 0xe6, 0x5a, + 0x26, 0x59, 0x17, 0xba, 0x61, 0xad, 0x7b, 0x91, 0x49, 0xb6, 0x1a, 0x13, 0xd4, 0x82, 0x89, 0x74, + 0xea, 0xc7, 0x70, 0xd2, 0xce, 0x17, 0xaf, 0xd3, 0xd9, 0x1b, 0xe3, 0xec, 0x35, 0x69, 0x5c, 0x88, + 0xb3, 0xf8, 0xa3, 0xe5, 0x64, 0x62, 0xbe, 0xa2, 0xa1, 0xf7, 0x4d, 0x25, 0xe7, 0x1b, 0x6e, 0x9b, + 0x93, 0x6f, 0x13, 0xce, 0x69, 0xb9, 0xcd, 0xae, 0x06, 0x0e, 0x73, 0xde, 0x70, 0xd9, 0x76, 0xaa, + 0xc9, 0x45, 0x8f, 0xde, 0xdf, 0x2f, 0x9f, 0x5b, 0x6b, 0x47, 0x88, 0xdb, 0xf3, 0x41, 0x37, 0xe0, + 0x24, 0x8f, 0x3c, 0x30, 0x4f, 0x9c, 0x7a, 0xc3, 0xf5, 0x94, 0xe0, 0xc5, 0x97, 0xfc, 0x99, 0xfb, + 0xfb, 0xe5, 0x93, 0x33, 0x59, 0x04, 0x38, 0xbb, 0x1c, 0xfa, 0x30, 0x94, 0xea, 0x5e, 0x28, 0xfa, + 0xa0, 0xcf, 0x48, 0x1f, 0x57, 0x9a, 0x5f, 0xad, 0xaa, 0xef, 0x8f, 0xff, 0xe0, 0xb8, 0x00, 0xda, + 0xe4, 0xb6, 0x01, 0xa5, 0x2d, 0xea, 0x4f, 0x45, 0x5e, 0x4b, 0x2a, 0x54, 0x8d, 0xb7, 0xc7, 0xdc, + 0x28, 0xa6, 0x9e, 0xe4, 0x18, 0xcf, 0x92, 0x0d, 0xc6, 0xe8, 0x75, 0x40, 0x22, 0x4d, 0xc1, 0x4c, + 0x8d, 0x65, 0xd5, 0x61, 0x47, 0xe3, 0x80, 0xf9, 0x1a, 0xb6, 0x9a, 0xa2, 0xc0, 0x19, 0xa5, 0xd0, + 0x35, 0xba, 0xab, 0xe8, 0x50, 0xb1, 0x6b, 0xa9, 0x24, 0xa5, 0xf3, 0xa4, 0x19, 0x10, 0xe6, 0x63, + 0x66, 0x72, 0xc4, 0x89, 0x72, 0xa8, 0x0e, 0x67, 0x9d, 0x56, 0xe4, 0x33, 0xb3, 0x8b, 0x49, 0xba, + 0xe6, 0x6f, 0x13, 0x8f, 0x59, 0x3c, 0x07, 0x66, 0x2f, 0x50, 0xc9, 0x6e, 0xa6, 0x0d, 0x1d, 0x6e, + 0xcb, 0x85, 0x4a, 0xe4, 0x2a, 0x2b, 0x39, 0x98, 0xf1, 0xe4, 0x32, 0x32, 0x93, 0xbf, 0x04, 0x83, + 0x5b, 0x7e, 0x18, 0xad, 0x92, 0xe8, 0x8e, 0x1f, 0x6c, 0x8b, 0xb8, 0xc8, 0x71, 0x2c, 0xfa, 0x18, + 0x85, 0x75, 0x3a, 0x7a, 0xe5, 0x66, 0xfe, 0x38, 0x4b, 0xf3, 0xcc, 0x15, 0x62, 0x20, 0xde, 0x63, + 0xae, 0x71, 0x30, 0x96, 0x78, 0x49, 0xba, 0x54, 0x99, 0x63, 0x6e, 0x0d, 0x09, 0xd2, 0xa5, 0xca, + 0x1c, 0x96, 0x78, 0x3a, 0x5d, 0xc3, 0x2d, 0x27, 0x20, 0x95, 0xc0, 0xaf, 0x91, 0x50, 0xcb, 0x80, + 0xf0, 0x08, 0x8f, 0xfa, 0x4c, 0xa7, 0x6b, 0x35, 0x8b, 0x00, 0x67, 0x97, 0x43, 0x24, 0x9d, 0xd7, + 0x6f, 0x24, 0xdf, 0x1e, 0x95, 0x96, 0x67, 0xba, 0x4c, 0xed, 0xe7, 0xc1, 0x98, 0xca, 0x28, 0xc8, + 0xe3, 0x3c, 0x87, 0x93, 0xa3, 0x6c, 0x6e, 0x77, 0x1f, 0x24, 0x5a, 0x59, 0xf8, 0x96, 0x12, 0x9c, + 0x70, 0x8a, 0xb7, 0x11, 0x32, 0x70, 0xac, 0x63, 0xc8, 0xc0, 0xcb, 0x50, 0x0a, 0x5b, 0xeb, 0x75, + 0x7f, 0xc7, 0x71, 0x3d, 0xe6, 0xd6, 0xa0, 0xdd, 0xfd, 0xaa, 0x12, 0x81, 0x63, 0x1a, 0xb4, 0x08, + 0x03, 0x8e, 0x34, 0xdf, 0xa1, 0xfc, 0x20, 0x51, 0xca, 0x68, 0xc7, 0xe3, 0xa6, 0x48, 0x83, 0x9d, + 0x2a, 0x8b, 0x5e, 0x85, 0x61, 0xf1, 0x72, 0x5e, 0x24, 0xe1, 0x9d, 0x30, 0x9f, 0x37, 0x56, 0x75, + 0x24, 0x36, 0x69, 0xd1, 0x4d, 0x18, 0x8c, 0xfc, 0x06, 0x7b, 0xa3, 0x47, 0xc5, 0xbc, 0x53, 0xf9, + 0xe1, 0x0e, 0xd7, 0x14, 0x99, 0xae, 0xb5, 0x56, 0x45, 0xb1, 0xce, 0x07, 0xad, 0xf1, 0xf9, 0xce, + 0xf2, 0x1d, 0x90, 0x50, 0x64, 0x71, 0x3d, 0x97, 0xe7, 0x93, 0xc6, 0xc8, 0xcc, 0xe5, 0x20, 0x4a, + 0x62, 0x9d, 0x0d, 0xba, 0x0a, 0xe3, 0xcd, 0xc0, 0xf5, 0xd9, 0x9c, 0x50, 0x96, 0xdb, 0x49, 0x33, + 0xbb, 0x59, 0x25, 0x49, 0x80, 0xd3, 0x65, 0x58, 0xe0, 0x03, 0x01, 0x9c, 0x3c, 0xc3, 0x33, 0xb4, + 0xf0, 0xab, 0x34, 0x87, 0x61, 0x85, 0x45, 0x2b, 0x6c, 0x27, 0xe6, 0x5a, 0xa0, 0xc9, 0xa9, 0xfc, + 0xb8, 0x54, 0xba, 0xb6, 0x88, 0x0b, 0xaf, 0xea, 0x2f, 0x8e, 0x39, 0xa0, 0xba, 0x96, 0x18, 0x95, + 0x5e, 0x01, 0xc2, 0xc9, 0xb3, 0x6d, 0x9c, 0x22, 0x13, 0xb7, 0xb2, 0x58, 0x20, 0x30, 0xc0, 0x21, + 0x4e, 0xf0, 0x44, 0x1f, 0x85, 0x31, 0x11, 0x4d, 0x33, 0xee, 0xa6, 0x73, 0xf1, 0xcb, 0x07, 0x9c, + 0xc0, 0xe1, 0x14, 0x35, 0xcf, 0x90, 0xe2, 0xac, 0x37, 0x88, 0xd8, 0xfa, 0x96, 0x5d, 0x6f, 0x3b, + 0x9c, 0x3c, 0xcf, 0xf6, 0x07, 0x91, 0x21, 0x25, 0x89, 0xc5, 0x19, 0x25, 0xd0, 0x1a, 0x8c, 0x35, + 0x03, 0x42, 0x76, 0x98, 0xa0, 0x2f, 0xce, 0xb3, 0x32, 0x8f, 0xfb, 0x41, 0x5b, 0x52, 0x49, 0xe0, + 0x0e, 0x32, 0x60, 0x38, 0xc5, 0x01, 0xdd, 0x81, 0x01, 0x7f, 0x97, 0x04, 0x5b, 0xc4, 0xa9, 0x4f, + 0x5e, 0x68, 0xf3, 0x12, 0x47, 0x1c, 0x6e, 0x37, 0x04, 0x6d, 0xc2, 0xdb, 0x43, 0x82, 0x3b, 0x7b, + 0x7b, 0xc8, 0xca, 0xd0, 0xff, 0x61, 0xc1, 0x19, 0x69, 0x9c, 0xa9, 0x36, 0x69, 0xaf, 0xcf, 0xf9, + 0x5e, 0x18, 0x05, 0x3c, 0x52, 0xc5, 0xa3, 0xf9, 0xd1, 0x1b, 0xd6, 0x72, 0x0a, 0x29, 0x45, 0xf4, + 0x99, 0x3c, 0x8a, 0x10, 0xe7, 0xd7, 0x48, 0xaf, 0xa6, 0x21, 0x89, 0xe4, 0x66, 0x34, 0x13, 0x2e, + 0xbe, 0x31, 0xbf, 0x3a, 0xf9, 0x18, 0x0f, 0xb3, 0x41, 0x17, 0x43, 0x35, 0x89, 0xc4, 0x69, 0x7a, + 0x74, 0x05, 0x0a, 0x7e, 0x38, 0xf9, 0x78, 0x9b, 0x5c, 0xba, 0x7e, 0xfd, 0x46, 0x95, 0x7b, 0xfd, + 0xdd, 0xa8, 0xe2, 0x82, 0x1f, 0xca, 0x2c, 0x25, 0xf4, 0x3e, 0x16, 0x4e, 0x3e, 0xc1, 0xd5, 0x96, + 0x32, 0x4b, 0x09, 0x03, 0xe2, 0x18, 0x8f, 0xb6, 0x60, 0x34, 0x34, 0xee, 0xbd, 0xe1, 0xe4, 0x45, + 0xd6, 0x53, 0x4f, 0xe4, 0x0d, 0x9a, 0x41, 0xad, 0xa5, 0x0f, 0x30, 0xb9, 0xe0, 0x24, 0x5b, 0xbe, + 0xba, 0xb4, 0x9b, 0x77, 0x38, 0xf9, 0x64, 0x87, 0xd5, 0xa5, 0x11, 0xeb, 0xab, 0x4b, 0xe7, 0x81, + 0x13, 0x3c, 0xa7, 0xbe, 0x03, 0xc6, 0x53, 0xe2, 0xd2, 0x61, 0x3c, 0xdc, 0xa7, 0xb6, 0x61, 0xd8, + 0x98, 0x92, 0x0f, 0xd5, 0xbb, 0xe2, 0xb7, 0x4b, 0x50, 0x52, 0x56, 0x6f, 0x74, 0xd9, 0x74, 0xa8, + 0x38, 0x93, 0x74, 0xa8, 0x18, 0xa8, 0xf8, 0x75, 0xc3, 0x87, 0x62, 0x2d, 0x23, 0x18, 0x63, 0xde, + 0x06, 0xd8, 0xfd, 0x23, 0x15, 0xcd, 0x94, 0x50, 0xec, 0xda, 0x33, 0xa3, 0xa7, 0xad, 0x75, 0xe2, + 0x2a, 0x8c, 0x7b, 0x3e, 0x93, 0xd1, 0x49, 0x5d, 0x0a, 0x60, 0x4c, 0xce, 0x2a, 0xe9, 0xd1, 0x8d, + 0x12, 0x04, 0x38, 0x5d, 0x86, 0x56, 0xc8, 0x05, 0xa5, 0xa4, 0x39, 0x84, 0xcb, 0x51, 0x58, 0x60, + 0xe9, 0xdd, 0x90, 0xff, 0x0a, 0x27, 0xc7, 0xf2, 0xef, 0x86, 0xbc, 0x50, 0x52, 0x18, 0x0b, 0xa5, + 0x30, 0xc6, 0xb4, 0xff, 0x4d, 0xbf, 0xbe, 0x54, 0x11, 0x62, 0xbe, 0x16, 0x49, 0xb8, 0xbe, 0x54, + 0xc1, 0x1c, 0x87, 0x66, 0xa0, 0x8f, 0xfd, 0x08, 0x27, 0x87, 0xf2, 0xa3, 0xe1, 0xb0, 0x12, 0x5a, + 0x96, 0x34, 0x56, 0x00, 0x8b, 0x82, 0x4c, 0xbb, 0x4b, 0xef, 0x46, 0x4c, 0xbb, 0xdb, 0xff, 0x80, + 0xda, 0x5d, 0xc9, 0x00, 0xc7, 0xbc, 0xd0, 0x5d, 0x38, 0x69, 0xdc, 0x47, 0xd5, 0xab, 0x1d, 0xc8, + 0x37, 0xfc, 0x26, 0x88, 0x67, 0xcf, 0x89, 0x46, 0x9f, 0x5c, 0xca, 0xe2, 0x84, 0xb3, 0x2b, 0x40, + 0x0d, 0x18, 0xaf, 0xa5, 0x6a, 0x1d, 0xe8, 0xbe, 0x56, 0x35, 0x2f, 0xd2, 0x35, 0xa6, 0x19, 0xa3, + 0x57, 0x61, 0xe0, 0x6d, 0x3f, 0x64, 0x47, 0xa4, 0xb8, 0x9a, 0xc8, 0x70, 0x0e, 0x03, 0x6f, 0xdc, + 0xa8, 0x32, 0xf8, 0xc1, 0x7e, 0x79, 0xb0, 0xe2, 0xd7, 0xe5, 0x5f, 0xac, 0x0a, 0xa0, 0xef, 0xb7, + 0x60, 0x2a, 0x7d, 0xe1, 0x55, 0x8d, 0x1e, 0xee, 0xbe, 0xd1, 0xb6, 0xa8, 0x74, 0x6a, 0x21, 0x97, + 0x1d, 0x6e, 0x53, 0x15, 0xfa, 0x10, 0x5d, 0x4f, 0xa1, 0x7b, 0x8f, 0x88, 0x14, 0xb3, 0x8f, 0xc6, + 0xeb, 0x89, 0x42, 0x0f, 0xf6, 0xcb, 0xa3, 0x7c, 0x67, 0x74, 0xef, 0xc9, 0xe7, 0x4d, 0xa2, 0x00, + 0xfa, 0x4e, 0x38, 0x19, 0xa4, 0x35, 0xa8, 0x44, 0x0a, 0xe1, 0x4f, 0x77, 0xb3, 0xcb, 0x26, 0x07, + 0x1c, 0x67, 0x31, 0xc4, 0xd9, 0xf5, 0xd8, 0xbf, 0x62, 0x31, 0xfd, 0xb6, 0x68, 0x16, 0x09, 0x5b, + 0x8d, 0xe3, 0x48, 0x6c, 0xbd, 0x60, 0xd8, 0x8e, 0x1f, 0xd8, 0xb1, 0xe8, 0x1f, 0x59, 0xcc, 0xb1, + 0xe8, 0x18, 0x5f, 0x31, 0xbd, 0x01, 0x03, 0x91, 0x4c, 0x38, 0xde, 0x26, 0x17, 0xb7, 0xd6, 0x28, + 0xe6, 0x5c, 0xa5, 0x2e, 0x39, 0x2a, 0xb7, 0xb8, 0x62, 0x63, 0xff, 0x7d, 0x3e, 0x02, 0x12, 0x73, + 0x0c, 0x26, 0xba, 0x79, 0xd3, 0x44, 0x57, 0xee, 0xf0, 0x05, 0x39, 0xa6, 0xba, 0xbf, 0x67, 0xb6, + 0x9b, 0x29, 0xf7, 0xde, 0xed, 0x1e, 0x6d, 0xf6, 0x17, 0x2c, 0x80, 0x38, 0xc8, 0x7c, 0x17, 0x29, + 0x25, 0x5f, 0xa6, 0xd7, 0x1a, 0x3f, 0xf2, 0x6b, 0x7e, 0x43, 0x18, 0x28, 0xce, 0xc6, 0x56, 0x42, + 0x0e, 0x3f, 0xd0, 0x7e, 0x63, 0x45, 0x8d, 0xca, 0x32, 0xa4, 0x65, 0x31, 0xb6, 0x5b, 0x1b, 0xe1, + 0x2c, 0xbf, 0x64, 0xc1, 0x89, 0x2c, 0x97, 0x78, 0x7a, 0x49, 0xe6, 0x6a, 0x4e, 0xe5, 0x6d, 0xa8, + 0x46, 0xf3, 0x96, 0x80, 0x63, 0x45, 0xd1, 0x75, 0xae, 0xce, 0xc3, 0x45, 0x77, 0xbf, 0x01, 0xc3, + 0x95, 0x80, 0x68, 0xf2, 0xc5, 0x6b, 0x3c, 0x4c, 0x0a, 0x6f, 0xcf, 0xb3, 0x87, 0x0e, 0x91, 0x62, + 0x7f, 0xb9, 0x00, 0x27, 0xb8, 0xd3, 0xce, 0xcc, 0xae, 0xef, 0xd6, 0x2b, 0x7e, 0x5d, 0x3c, 0x64, + 0x7c, 0x13, 0x86, 0x9a, 0x9a, 0x6e, 0xba, 0x5d, 0xa4, 0x62, 0x5d, 0x87, 0x1d, 0x6b, 0xd3, 0x74, + 0x28, 0x36, 0x78, 0xa1, 0x3a, 0x0c, 0x91, 0x5d, 0xb7, 0xa6, 0x3c, 0x3f, 0x0a, 0x87, 0x3e, 0xa4, + 0x55, 0x2d, 0x0b, 0x1a, 0x1f, 0x6c, 0x70, 0x7d, 0x08, 0x19, 0xf4, 0xed, 0x1f, 0xb5, 0xe0, 0x74, + 0x4e, 0x5c, 0x63, 0x5a, 0xdd, 0x1d, 0xe6, 0x1e, 0x25, 0xa6, 0xad, 0xaa, 0x8e, 0x3b, 0x4d, 0x61, + 0x81, 0x45, 0x1f, 0x03, 0xe0, 0x4e, 0x4f, 0xc4, 0xab, 0x75, 0x0c, 0x00, 0x6b, 0xc4, 0xae, 0xd4, + 0xc2, 0x10, 0xca, 0xf2, 0x58, 0xe3, 0x65, 0x7f, 0xa9, 0x07, 0x7a, 0x99, 0x93, 0x0d, 0xaa, 0x40, + 0xff, 0x16, 0xcf, 0x54, 0xd5, 0x76, 0xdc, 0x28, 0xad, 0x4c, 0x7e, 0x15, 0x8f, 0x9b, 0x06, 0xc5, + 0x92, 0x0d, 0x5a, 0x81, 0x09, 0x9e, 0x30, 0xac, 0x31, 0x4f, 0x1a, 0xce, 0x9e, 0x54, 0xfb, 0xf2, + 0x1c, 0xd8, 0x4a, 0xfd, 0xbd, 0x94, 0x26, 0xc1, 0x59, 0xe5, 0xd0, 0x6b, 0x30, 0x42, 0xaf, 0xe1, + 0x7e, 0x2b, 0x92, 0x9c, 0x78, 0xaa, 0x30, 0x75, 0x33, 0x59, 0x33, 0xb0, 0x38, 0x41, 0x8d, 0x5e, + 0x85, 0xe1, 0x66, 0x4a, 0xc1, 0xdd, 0x1b, 0x6b, 0x82, 0x4c, 0xa5, 0xb6, 0x49, 0xcb, 0xbc, 0xe2, + 0x5b, 0xec, 0x0d, 0xc0, 0xda, 0x56, 0x40, 0xc2, 0x2d, 0xbf, 0x51, 0x67, 0x12, 0x70, 0xaf, 0xe6, + 0x15, 0x9f, 0xc0, 0xe3, 0x54, 0x09, 0xca, 0x65, 0xc3, 0x71, 0x1b, 0xad, 0x80, 0xc4, 0x5c, 0xfa, + 0x4c, 0x2e, 0x8b, 0x09, 0x3c, 0x4e, 0x95, 0xe8, 0xac, 0xb9, 0xef, 0x3f, 0x1a, 0xcd, 0xbd, 0xfd, + 0xd3, 0x05, 0x30, 0x86, 0xf6, 0xdb, 0x37, 0x85, 0x19, 0xfd, 0xb2, 0xcd, 0xa0, 0x59, 0x13, 0x0e, + 0x65, 0x99, 0x5f, 0x16, 0xe7, 0x2f, 0xe6, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8a, 0xae, 0xf1, 0x93, + 0x95, 0xc0, 0xa7, 0x87, 0x9c, 0x0c, 0xa4, 0xa7, 0x1e, 0x9f, 0xf4, 0xcb, 0x20, 0x03, 0x6d, 0x42, + 0xce, 0x0a, 0xf7, 0x7c, 0xce, 0xc1, 0xf0, 0xbd, 0xaa, 0x8a, 0x68, 0x1f, 0x92, 0x0b, 0xba, 0x02, + 0x83, 0x22, 0x2f, 0x15, 0x7b, 0x23, 0xc1, 0x17, 0x13, 0xf3, 0x15, 0x9b, 0x8f, 0xc1, 0x58, 0xa7, + 0xb1, 0x7f, 0xa0, 0x00, 0x13, 0x19, 0x8f, 0xdc, 0xf8, 0x31, 0xb2, 0xe9, 0x86, 0x91, 0x4a, 0x91, + 0xac, 0x1d, 0x23, 0x1c, 0x8e, 0x15, 0x05, 0xdd, 0xab, 0xf8, 0x41, 0x95, 0x3c, 0x9c, 0xc4, 0x23, + 0x12, 0x81, 0x3d, 0x64, 0xb2, 0xe1, 0x0b, 0xd0, 0xd3, 0x0a, 0x89, 0x0c, 0x16, 0xad, 0x8e, 0x6d, + 0x66, 0xd6, 0x66, 0x18, 0x7a, 0x05, 0xdc, 0x54, 0x16, 0x62, 0xed, 0x0a, 0xc8, 0x6d, 0xc4, 0x1c, + 0x47, 0x1b, 0x17, 0x11, 0xcf, 0xf1, 0x22, 0x71, 0x51, 0x8c, 0xa3, 0x9e, 0x32, 0x28, 0x16, 0x58, + 0xfb, 0x8b, 0x45, 0x38, 0x93, 0xfb, 0xec, 0x95, 0x36, 0x7d, 0xc7, 0xf7, 0xdc, 0xc8, 0x57, 0x4e, + 0x78, 0x3c, 0xd2, 0x29, 0x69, 0x6e, 0xad, 0x08, 0x38, 0x56, 0x14, 0xe8, 0x22, 0xf4, 0x32, 0xa5, + 0x78, 0x2a, 0x59, 0xf4, 0xec, 0x3c, 0x0f, 0x7d, 0xc7, 0xd1, 0x5d, 0xe7, 0xf7, 0x7f, 0x8c, 0x4a, + 0x30, 0x7e, 0x23, 0x79, 0xa0, 0xd0, 0xe6, 0xfa, 0x7e, 0x03, 0x33, 0x24, 0x7a, 0x42, 0xf4, 0x57, + 0xc2, 0xeb, 0x0c, 0x3b, 0x75, 0x3f, 0xd4, 0x3a, 0xed, 0x29, 0xe8, 0xdf, 0x26, 0x7b, 0x81, 0xeb, + 0x6d, 0x26, 0xbd, 0x11, 0xaf, 0x73, 0x30, 0x96, 0x78, 0x33, 0x6f, 0x69, 0xff, 0x51, 0x27, 0xe6, + 0x1f, 0xe8, 0x28, 0x9e, 0xfc, 0x50, 0x11, 0x46, 0xf1, 0xec, 0xfc, 0x7b, 0x03, 0x71, 0x33, 0x3d, + 0x10, 0x47, 0x9d, 0x98, 0xbf, 0xf3, 0x68, 0xfc, 0xa2, 0x05, 0xa3, 0x2c, 0x3b, 0x96, 0x88, 0x59, + 0xe1, 0xfa, 0xde, 0x31, 0x5c, 0x05, 0x1e, 0x83, 0xde, 0x80, 0x56, 0x9a, 0xcc, 0x12, 0xcd, 0x5a, + 0x82, 0x39, 0x0e, 0x9d, 0x85, 0x1e, 0xd6, 0x04, 0x3a, 0x78, 0x43, 0x7c, 0x0b, 0x9e, 0x77, 0x22, + 0x07, 0x33, 0x28, 0x0b, 0xfc, 0x86, 0x49, 0xb3, 0xe1, 0xf2, 0x46, 0xc7, 0x2e, 0x0b, 0xef, 0x8e, + 0x80, 0x18, 0x99, 0x4d, 0x7b, 0x67, 0x81, 0xdf, 0xb2, 0x59, 0xb6, 0xbf, 0x66, 0xff, 0x79, 0x01, + 0xce, 0x67, 0x96, 0xeb, 0x3a, 0xf0, 0x5b, 0xfb, 0xd2, 0x0f, 0x33, 0xff, 0x51, 0xf1, 0x18, 0x7d, + 0xbd, 0x7b, 0xba, 0x95, 0xfe, 0x7b, 0xbb, 0x88, 0xc7, 0x96, 0xd9, 0x65, 0xef, 0x92, 0x78, 0x6c, + 0x99, 0x6d, 0xcb, 0x51, 0x13, 0xfc, 0x75, 0x21, 0xe7, 0x5b, 0x98, 0xc2, 0xe0, 0x12, 0xdd, 0x67, + 0x18, 0x32, 0x94, 0x97, 0x70, 0xbe, 0xc7, 0x70, 0x18, 0x56, 0x58, 0x34, 0x03, 0xa3, 0x3b, 0xae, + 0x47, 0x37, 0x9f, 0x3d, 0x53, 0x14, 0x57, 0xb6, 0x8c, 0x15, 0x13, 0x8d, 0x93, 0xf4, 0xc8, 0xd5, + 0x62, 0xb5, 0xf1, 0xaf, 0x7b, 0xf5, 0x50, 0xab, 0x6e, 0xda, 0x74, 0xe7, 0x50, 0xbd, 0x98, 0x11, + 0xb7, 0x6d, 0x45, 0xd3, 0x13, 0x15, 0xbb, 0xd7, 0x13, 0x0d, 0x65, 0xeb, 0x88, 0xa6, 0x5e, 0x85, + 0xe1, 0x07, 0xb6, 0x8d, 0xd8, 0x5f, 0x2f, 0xc2, 0x23, 0x6d, 0x96, 0x3d, 0xdf, 0xeb, 0x8d, 0x31, + 0xd0, 0xf6, 0xfa, 0xd4, 0x38, 0x54, 0xe0, 0xc4, 0x46, 0xab, 0xd1, 0xd8, 0x63, 0x4f, 0xa0, 0x48, + 0x5d, 0x52, 0x08, 0x99, 0x52, 0x2a, 0x47, 0x4e, 0x2c, 0x66, 0xd0, 0xe0, 0xcc, 0x92, 0xf4, 0x8a, + 0x45, 0x4f, 0x92, 0x3d, 0xc5, 0x2a, 0x71, 0xc5, 0xc2, 0x3a, 0x12, 0x9b, 0xb4, 0xe8, 0x2a, 0x8c, + 0x3b, 0xbb, 0x8e, 0xcb, 0x03, 0xde, 0x4b, 0x06, 0xfc, 0x8e, 0xa5, 0x74, 0xd1, 0x33, 0x49, 0x02, + 0x9c, 0x2e, 0x83, 0x5e, 0x07, 0xe4, 0xaf, 0xb3, 0x87, 0x12, 0xf5, 0xab, 0xc4, 0x13, 0x56, 0x77, + 0x36, 0x76, 0xc5, 0x78, 0x4b, 0xb8, 0x91, 0xa2, 0xc0, 0x19, 0xa5, 0x12, 0x81, 0xc9, 0xfa, 0xf2, + 0x03, 0x93, 0xb5, 0xdf, 0x17, 0x3b, 0xa6, 0xde, 0xba, 0x02, 0xc3, 0x87, 0x74, 0xff, 0xb5, 0xff, + 0x8d, 0x05, 0x4a, 0x41, 0x6c, 0x46, 0xfd, 0x7d, 0x95, 0xf9, 0x27, 0x73, 0xd5, 0xb6, 0x16, 0x2d, + 0xe9, 0xa4, 0xe6, 0x9f, 0x1c, 0x23, 0xb1, 0x49, 0xcb, 0xe7, 0x90, 0xe6, 0x57, 0x6c, 0xdc, 0x0a, + 0x44, 0x68, 0x42, 0x45, 0x81, 0x3e, 0x0e, 0xfd, 0x75, 0x77, 0xd7, 0x0d, 0x85, 0x72, 0xec, 0xd0, + 0xc6, 0xb8, 0x78, 0xeb, 0x9c, 0xe7, 0x6c, 0xb0, 0xe4, 0x67, 0xff, 0x50, 0x21, 0xee, 0x93, 0x37, + 0x5a, 0x7e, 0xe4, 0x1c, 0xc3, 0x49, 0x7e, 0xd5, 0x38, 0xc9, 0x9f, 0x68, 0x17, 0x9f, 0x91, 0x35, + 0x29, 0xf7, 0x04, 0xbf, 0x91, 0x38, 0xc1, 0x9f, 0xec, 0xcc, 0xaa, 0xfd, 0xc9, 0xfd, 0x0f, 0x2c, + 0x18, 0x37, 0xe8, 0x8f, 0xe1, 0x00, 0x59, 0x34, 0x0f, 0x90, 0x47, 0x3b, 0x7e, 0x43, 0xce, 0xc1, + 0xf1, 0xbd, 0xc5, 0x44, 0xdb, 0xd9, 0x81, 0xf1, 0x36, 0xf4, 0x6c, 0x39, 0x41, 0xbd, 0x5d, 0x3e, + 0x9a, 0x54, 0xa1, 0xe9, 0x6b, 0x4e, 0x20, 0x3c, 0x15, 0x9e, 0x95, 0xbd, 0x4e, 0x41, 0x1d, 0xbd, + 0x14, 0x58, 0x55, 0xe8, 0x65, 0xe8, 0x0b, 0x6b, 0x7e, 0x53, 0xbd, 0x99, 0xba, 0xc0, 0x3a, 0x9a, + 0x41, 0x0e, 0xf6, 0xcb, 0xc8, 0xac, 0x8e, 0x82, 0xb1, 0xa0, 0x47, 0x6f, 0xc2, 0x30, 0xfb, 0xa5, + 0xdc, 0x06, 0x8b, 0xf9, 0x1a, 0x8c, 0xaa, 0x4e, 0xc8, 0x7d, 0x6a, 0x0d, 0x10, 0x36, 0x59, 0x4d, + 0x6d, 0x42, 0x49, 0x7d, 0xd6, 0x43, 0xb5, 0x76, 0xff, 0xab, 0x22, 0x4c, 0x64, 0xcc, 0x39, 0x14, + 0x1a, 0x23, 0x71, 0xa5, 0xcb, 0xa9, 0xfa, 0x0e, 0xc7, 0x22, 0x64, 0x17, 0xa8, 0xba, 0x98, 0x5b, + 0x5d, 0x57, 0x7a, 0x33, 0x24, 0xc9, 0x4a, 0x29, 0xa8, 0x73, 0xa5, 0xb4, 0xb2, 0x63, 0xeb, 0x6a, + 0x5a, 0x91, 0x6a, 0xe9, 0x43, 0x1d, 0xd3, 0x5f, 0xef, 0x81, 0x13, 0x59, 0x21, 0x63, 0xd1, 0x67, + 0x13, 0xd9, 0x90, 0x5f, 0xec, 0x36, 0xd8, 0x2c, 0x4f, 0x91, 0x2c, 0xc2, 0x40, 0x4e, 0x9b, 0xf9, + 0x91, 0x3b, 0x76, 0xb3, 0xa8, 0x93, 0x05, 0xa0, 0x09, 0x78, 0x16, 0x6b, 0xb9, 0x7d, 0x7c, 0xa0, + 0xeb, 0x06, 0x88, 0xf4, 0xd7, 0x61, 0xc2, 0x25, 0x49, 0x82, 0x3b, 0xbb, 0x24, 0xc9, 0x9a, 0xd1, + 0x12, 0xf4, 0xd5, 0xb8, 0xaf, 0x4b, 0xb1, 0xf3, 0x16, 0xc6, 0x1d, 0x5d, 0xd4, 0x06, 0x2c, 0x1c, + 0x5c, 0x04, 0x83, 0x29, 0x17, 0x06, 0xb5, 0x8e, 0x79, 0xa8, 0x93, 0x67, 0x9b, 0x1e, 0x7c, 0x5a, + 0x17, 0x3c, 0xd4, 0x09, 0xf4, 0xa3, 0x16, 0x24, 0x1e, 0xbc, 0x28, 0xa5, 0x9c, 0x95, 0xab, 0x94, + 0xbb, 0x00, 0x3d, 0x81, 0xdf, 0x20, 0xc9, 0x0c, 0xc4, 0xd8, 0x6f, 0x10, 0xcc, 0x30, 0x94, 0x22, + 0x8a, 0x55, 0x2d, 0x43, 0xfa, 0x35, 0x52, 0x5c, 0x10, 0x1f, 0x83, 0xde, 0x06, 0xd9, 0x25, 0x8d, + 0x64, 0xa2, 0xb8, 0x65, 0x0a, 0xc4, 0x1c, 0x67, 0xff, 0x62, 0x0f, 0x9c, 0x6b, 0x1b, 0x0d, 0x8a, + 0x5e, 0xc6, 0x36, 0x9d, 0x88, 0xdc, 0x71, 0xf6, 0x92, 0x19, 0x9d, 0xae, 0x72, 0x30, 0x96, 0x78, + 0xf6, 0xfc, 0x93, 0x27, 0x66, 0x48, 0xa8, 0x30, 0x45, 0x3e, 0x06, 0x81, 0x35, 0x55, 0x62, 0xc5, + 0xa3, 0x50, 0x89, 0x3d, 0x0f, 0x10, 0x86, 0x0d, 0xee, 0x16, 0x58, 0x17, 0xef, 0x4a, 0xe3, 0x04, + 0x1e, 0xd5, 0x65, 0x81, 0xc1, 0x1a, 0x15, 0x9a, 0x87, 0xb1, 0x66, 0xe0, 0x47, 0x5c, 0x23, 0x3c, + 0xcf, 0x3d, 0x67, 0x7b, 0xcd, 0x40, 0x3c, 0x95, 0x04, 0x1e, 0xa7, 0x4a, 0xa0, 0x97, 0x60, 0x50, + 0x04, 0xe7, 0xa9, 0xf8, 0x7e, 0x43, 0x28, 0xa1, 0x94, 0x33, 0x69, 0x35, 0x46, 0x61, 0x9d, 0x4e, + 0x2b, 0xc6, 0xd4, 0xcc, 0xfd, 0x99, 0xc5, 0xb8, 0xaa, 0x59, 0xa3, 0x4b, 0x44, 0xa2, 0x1e, 0xe8, + 0x2a, 0x12, 0x75, 0xac, 0x96, 0x2b, 0x75, 0x6d, 0xf5, 0x84, 0x8e, 0x8a, 0xac, 0xaf, 0xf4, 0xc0, + 0x84, 0x98, 0x38, 0x0f, 0x7b, 0xba, 0xdc, 0x4c, 0x4f, 0x97, 0xa3, 0x50, 0xdc, 0xbd, 0x37, 0x67, + 0x8e, 0x7b, 0xce, 0xfc, 0xb0, 0x05, 0xa6, 0xa4, 0x86, 0xfe, 0xb7, 0xdc, 0x94, 0x78, 0x2f, 0xe5, + 0x4a, 0x7e, 0x71, 0x94, 0xdf, 0x77, 0x96, 0x1c, 0xcf, 0xfe, 0xd7, 0x16, 0x3c, 0xda, 0x91, 0x23, + 0x5a, 0x80, 0x12, 0x13, 0x27, 0xb5, 0x8b, 0xde, 0x93, 0xca, 0xb3, 0x5e, 0x22, 0x72, 0xa4, 0xdb, + 0xb8, 0x24, 0x5a, 0x48, 0xe5, 0x1e, 0x7c, 0x2a, 0x23, 0xf7, 0xe0, 0x49, 0xa3, 0x7b, 0x1e, 0x30, + 0xf9, 0xe0, 0x0f, 0xd2, 0x13, 0xc7, 0x78, 0xd5, 0x86, 0x3e, 0x60, 0x28, 0x1d, 0xed, 0x84, 0xd2, + 0x11, 0x99, 0xd4, 0xda, 0x19, 0xf2, 0x51, 0x18, 0x63, 0x51, 0xfb, 0xd8, 0x3b, 0x0f, 0xf1, 0xde, + 0xae, 0x10, 0xfb, 0x72, 0x2f, 0x27, 0x70, 0x38, 0x45, 0x6d, 0xff, 0x69, 0x11, 0xfa, 0xf8, 0xf2, + 0x3b, 0x86, 0xeb, 0xe5, 0x33, 0x50, 0x72, 0x77, 0x76, 0x5a, 0x3c, 0x9d, 0x5c, 0x6f, 0xec, 0x19, + 0xbc, 0x24, 0x81, 0x38, 0xc6, 0xa3, 0x45, 0xa1, 0xef, 0x6e, 0x13, 0x18, 0x98, 0x37, 0x7c, 0x7a, + 0xde, 0x89, 0x1c, 0x2e, 0x2b, 0xa9, 0x73, 0x36, 0xd6, 0x8c, 0xa3, 0x4f, 0x01, 0x84, 0x51, 0xe0, + 0x7a, 0x9b, 0x14, 0x26, 0x62, 0xab, 0x3f, 0xdd, 0x86, 0x5b, 0x55, 0x11, 0x73, 0x9e, 0xf1, 0x9e, + 0xa3, 0x10, 0x58, 0xe3, 0x88, 0xa6, 0x8d, 0x93, 0x7e, 0x2a, 0x31, 0x76, 0xc0, 0xb9, 0xc6, 0x63, + 0x36, 0xf5, 0x41, 0x28, 0x29, 0xe6, 0x9d, 0xb4, 0x5f, 0x43, 0xba, 0x58, 0xf4, 0x11, 0x18, 0x4d, + 0xb4, 0xed, 0x50, 0xca, 0xb3, 0x5f, 0xb2, 0x60, 0x94, 0x37, 0x66, 0xc1, 0xdb, 0x15, 0xa7, 0xc1, + 0x3d, 0x38, 0xd1, 0xc8, 0xd8, 0x95, 0xc5, 0xf0, 0x77, 0xbf, 0x8b, 0x2b, 0x65, 0x59, 0x16, 0x16, + 0x67, 0xd6, 0x81, 0x2e, 0xd1, 0x15, 0x47, 0x77, 0x5d, 0xa7, 0x21, 0xe2, 0x1b, 0x0c, 0xf1, 0xd5, + 0xc6, 0x61, 0x58, 0x61, 0xed, 0x3f, 0xb0, 0x60, 0x9c, 0xb7, 0xfc, 0x3a, 0xd9, 0x53, 0x7b, 0xd3, + 0x37, 0xb3, 0xed, 0x22, 0x91, 0x69, 0x21, 0x27, 0x91, 0xa9, 0xfe, 0x69, 0xc5, 0xb6, 0x9f, 0xf6, + 0x65, 0x0b, 0xc4, 0x0c, 0x39, 0x06, 0x7d, 0xc6, 0x77, 0x98, 0xfa, 0x8c, 0xa9, 0xfc, 0x45, 0x90, + 0xa3, 0xc8, 0xf8, 0x2b, 0x0b, 0xc6, 0x38, 0x41, 0x6c, 0xab, 0xff, 0xa6, 0x8e, 0xc3, 0xac, 0xf9, + 0x45, 0x99, 0xce, 0x97, 0xd7, 0xc9, 0xde, 0x9a, 0x5f, 0x71, 0xa2, 0xad, 0xec, 0x8f, 0x32, 0x06, + 0xab, 0xa7, 0xed, 0x60, 0xd5, 0xe5, 0x02, 0x32, 0xf2, 0x7c, 0x75, 0x08, 0x10, 0x70, 0xd8, 0x3c, + 0x5f, 0xf6, 0x9f, 0x59, 0x80, 0x78, 0x35, 0x86, 0xe0, 0x46, 0xc5, 0x21, 0x06, 0xd5, 0x0e, 0xba, + 0x78, 0x6b, 0x52, 0x18, 0xac, 0x51, 0x1d, 0x49, 0xf7, 0x24, 0x1c, 0x2e, 0x8a, 0x9d, 0x1d, 0x2e, + 0x0e, 0xd1, 0xa3, 0xff, 0xac, 0x0f, 0x92, 0x2f, 0xfb, 0xd0, 0x2d, 0x18, 0xaa, 0x39, 0x4d, 0x67, + 0xdd, 0x6d, 0xb8, 0x91, 0x4b, 0xc2, 0x76, 0xde, 0x58, 0x73, 0x1a, 0x9d, 0x30, 0x91, 0x6b, 0x10, + 0x6c, 0xf0, 0x41, 0xd3, 0x00, 0xcd, 0xc0, 0xdd, 0x75, 0x1b, 0x64, 0x93, 0xa9, 0x5d, 0x58, 0x44, + 0x15, 0xee, 0x1a, 0x26, 0xa1, 0x58, 0xa3, 0xc8, 0x08, 0xa3, 0x50, 0x7c, 0xc8, 0x61, 0x14, 0xe0, + 0xd8, 0xc2, 0x28, 0xf4, 0x1c, 0x2a, 0x8c, 0xc2, 0xc0, 0xa1, 0xc3, 0x28, 0xf4, 0x76, 0x15, 0x46, + 0x01, 0xc3, 0x29, 0x29, 0x7b, 0xd2, 0xff, 0x8b, 0x6e, 0x83, 0x88, 0x0b, 0x07, 0x0f, 0x03, 0x33, + 0x75, 0x7f, 0xbf, 0x7c, 0x0a, 0x67, 0x52, 0xe0, 0x9c, 0x92, 0xe8, 0x63, 0x30, 0xe9, 0x34, 0x1a, + 0xfe, 0x1d, 0x35, 0xa8, 0x0b, 0x61, 0xcd, 0x69, 0x70, 0x13, 0x48, 0x3f, 0xe3, 0x7a, 0xf6, 0xfe, + 0x7e, 0x79, 0x72, 0x26, 0x87, 0x06, 0xe7, 0x96, 0x46, 0x1f, 0x86, 0x52, 0x33, 0xf0, 0x6b, 0x2b, + 0xda, 0xf3, 0xe3, 0xf3, 0xb4, 0x03, 0x2b, 0x12, 0x78, 0xb0, 0x5f, 0x1e, 0x56, 0x7f, 0xd8, 0x81, + 0x1f, 0x17, 0xc8, 0x88, 0x8b, 0x30, 0x78, 0xa4, 0x71, 0x11, 0xb6, 0x61, 0xa2, 0x4a, 0x02, 0xd7, + 0x69, 0xb8, 0xf7, 0xa8, 0xbc, 0x2c, 0xf7, 0xa7, 0x35, 0x28, 0x05, 0x89, 0x1d, 0xb9, 0xab, 0x60, + 0xbd, 0x5a, 0xc2, 0x25, 0xb9, 0x03, 0xc7, 0x8c, 0xec, 0xff, 0x66, 0x41, 0xbf, 0x78, 0xc9, 0x77, + 0x0c, 0x52, 0xe3, 0x8c, 0x61, 0x94, 0x28, 0x67, 0x77, 0x18, 0x6b, 0x4c, 0xae, 0x39, 0x62, 0x29, + 0x61, 0x8e, 0x78, 0xb4, 0x1d, 0x93, 0xf6, 0x86, 0x88, 0xff, 0xaf, 0x48, 0xa5, 0x77, 0xe3, 0x4d, + 0xf9, 0xc3, 0xef, 0x82, 0x55, 0xe8, 0x0f, 0xc5, 0x9b, 0xe6, 0x42, 0xfe, 0x6b, 0x90, 0xe4, 0x20, + 0xc6, 0x5e, 0x74, 0xe2, 0x15, 0xb3, 0x64, 0x92, 0xf9, 0x58, 0xba, 0xf8, 0x10, 0x1f, 0x4b, 0x77, + 0x7a, 0x75, 0xdf, 0x73, 0x14, 0xaf, 0xee, 0xed, 0xaf, 0xb1, 0x93, 0x53, 0x87, 0x1f, 0x83, 0x50, + 0x75, 0xd5, 0x3c, 0x63, 0xed, 0x36, 0x33, 0x4b, 0x34, 0x2a, 0x47, 0xb8, 0xfa, 0x05, 0x0b, 0xce, + 0x65, 0x7c, 0x95, 0x26, 0x69, 0x3d, 0x0b, 0x03, 0x4e, 0xab, 0xee, 0xaa, 0xb5, 0xac, 0x99, 0x26, + 0x67, 0x04, 0x1c, 0x2b, 0x0a, 0x34, 0x07, 0xe3, 0xe4, 0x6e, 0xd3, 0xe5, 0x86, 0x5c, 0xdd, 0xf9, + 0xb8, 0xc8, 0x9f, 0x7f, 0x2e, 0x24, 0x91, 0x38, 0x4d, 0xaf, 0x02, 0x44, 0x15, 0x73, 0x03, 0x44, + 0xfd, 0xbc, 0x05, 0x83, 0xea, 0x55, 0xef, 0x43, 0xef, 0xed, 0x8f, 0x9a, 0xbd, 0xfd, 0x48, 0x9b, + 0xde, 0xce, 0xe9, 0xe6, 0xdf, 0x2b, 0xa8, 0xf6, 0x56, 0xfc, 0x20, 0xea, 0x42, 0x82, 0x7b, 0xf0, + 0x87, 0x13, 0x57, 0x60, 0xd0, 0x69, 0x36, 0x25, 0x42, 0x7a, 0xc0, 0xb1, 0xd0, 0xeb, 0x31, 0x18, + 0xeb, 0x34, 0xea, 0x1d, 0x47, 0x31, 0xf7, 0x1d, 0x47, 0x1d, 0x20, 0x72, 0x82, 0x4d, 0x12, 0x51, + 0x98, 0x70, 0xd8, 0xcd, 0xdf, 0x6f, 0x5a, 0x91, 0xdb, 0x98, 0x76, 0xbd, 0x28, 0x8c, 0x82, 0xe9, + 0x25, 0x2f, 0xba, 0x11, 0xf0, 0x2b, 0xa4, 0x16, 0x62, 0x4d, 0xf1, 0xc2, 0x1a, 0x5f, 0x19, 0xc1, + 0x82, 0xd5, 0xd1, 0x6b, 0xba, 0x52, 0xac, 0x0a, 0x38, 0x56, 0x14, 0xf6, 0x07, 0xd9, 0xe9, 0xc3, + 0xfa, 0xf4, 0x70, 0xe1, 0xc5, 0x7e, 0x72, 0x48, 0x8d, 0x06, 0x33, 0x8a, 0xce, 0xeb, 0x41, 0xcc, + 0xda, 0x6f, 0xf6, 0xb4, 0x62, 0xfd, 0x45, 0x64, 0x1c, 0xe9, 0x0c, 0x7d, 0x22, 0xe5, 0x1e, 0xf3, + 0x5c, 0x87, 0x53, 0xe3, 0x10, 0x0e, 0x31, 0x2c, 0x0f, 0x13, 0xcb, 0x52, 0xb3, 0x54, 0x11, 0xeb, + 0x42, 0xcb, 0xc3, 0x24, 0x10, 0x38, 0xa6, 0xa1, 0xc2, 0x94, 0xfa, 0x13, 0x4e, 0xa2, 0x38, 0x16, + 0xb0, 0xa2, 0x0e, 0xb1, 0x46, 0x81, 0x2e, 0x0b, 0x85, 0x02, 0xb7, 0x0b, 0x3c, 0x92, 0x50, 0x28, + 0xc8, 0xee, 0xd2, 0xb4, 0x40, 0x57, 0x60, 0x90, 0xdc, 0x8d, 0x48, 0xe0, 0x39, 0x0d, 0x5a, 0x43, + 0x6f, 0x1c, 0x3f, 0x73, 0x21, 0x06, 0x63, 0x9d, 0x06, 0xad, 0xc1, 0x68, 0xc8, 0xf5, 0x6c, 0x2a, + 0x48, 0x3c, 0xd7, 0x57, 0x3e, 0xad, 0xde, 0x53, 0x9b, 0xe8, 0x03, 0x06, 0xe2, 0xbb, 0x93, 0x8c, + 0x32, 0x91, 0x64, 0x81, 0x5e, 0x83, 0x91, 0x86, 0xef, 0xd4, 0x67, 0x9d, 0x86, 0xe3, 0xd5, 0x58, + 0xff, 0x0c, 0x98, 0x89, 0xa8, 0x97, 0x0d, 0x2c, 0x4e, 0x50, 0x53, 0xe1, 0x4d, 0x87, 0x88, 0x30, + 0x6d, 0x8e, 0xb7, 0x49, 0x42, 0x91, 0x0f, 0x9e, 0x09, 0x6f, 0xcb, 0x39, 0x34, 0x38, 0xb7, 0x34, + 0x7a, 0x19, 0x86, 0xe4, 0xe7, 0x6b, 0x41, 0x59, 0xe2, 0x27, 0x31, 0x1a, 0x0e, 0x1b, 0x94, 0x28, + 0x84, 0x93, 0xf2, 0xff, 0x5a, 0xe0, 0x6c, 0x6c, 0xb8, 0x35, 0x11, 0xa9, 0x80, 0x3f, 0x1f, 0xfe, + 0x88, 0x7c, 0xab, 0xb8, 0x90, 0x45, 0x74, 0xb0, 0x5f, 0x3e, 0x2b, 0x7a, 0x2d, 0x13, 0x8f, 0xb3, + 0x79, 0xa3, 0x15, 0x98, 0xd8, 0x22, 0x4e, 0x23, 0xda, 0x9a, 0xdb, 0x22, 0xb5, 0x6d, 0xb9, 0xe0, + 0x58, 0x98, 0x17, 0xed, 0xe9, 0xc8, 0xb5, 0x34, 0x09, 0xce, 0x2a, 0x87, 0xde, 0x82, 0xc9, 0x66, + 0x6b, 0xbd, 0xe1, 0x86, 0x5b, 0xab, 0x7e, 0xc4, 0x9c, 0x90, 0x66, 0xea, 0xf5, 0x80, 0x84, 0xfc, + 0x75, 0x29, 0x3b, 0x7a, 0x65, 0x20, 0x9d, 0x4a, 0x0e, 0x1d, 0xce, 0xe5, 0x80, 0xee, 0xc1, 0xc9, + 0xc4, 0x44, 0x10, 0x11, 0x31, 0x46, 0xf2, 0x53, 0xc4, 0x54, 0xb3, 0x0a, 0x88, 0xe0, 0x32, 0x59, + 0x28, 0x9c, 0x5d, 0x05, 0x7a, 0x05, 0xc0, 0x6d, 0x2e, 0x3a, 0x3b, 0x6e, 0x83, 0x5e, 0x15, 0x27, + 0xd8, 0x1c, 0xa1, 0xd7, 0x06, 0x58, 0xaa, 0x48, 0x28, 0xdd, 0x9b, 0xc5, 0xbf, 0x3d, 0xac, 0x51, + 0xa3, 0x65, 0x18, 0x11, 0xff, 0xf6, 0xc4, 0x90, 0xf2, 0xc0, 0x2c, 0x8f, 0xb3, 0xa8, 0x5a, 0x15, + 0x1d, 0x73, 0x90, 0x82, 0xe0, 0x44, 0x59, 0xb4, 0x09, 0xe7, 0x64, 0xa2, 0x3f, 0x7d, 0x7e, 0xca, + 0x31, 0x08, 0x59, 0x5e, 0x96, 0x01, 0xfe, 0x2a, 0x65, 0xa6, 0x1d, 0x21, 0x6e, 0xcf, 0x87, 0x9e, + 0xeb, 0xfa, 0x34, 0xe7, 0x6f, 0x8e, 0x4f, 0xc6, 0x11, 0x07, 0x97, 0x93, 0x48, 0x9c, 0xa6, 0x47, + 0x3e, 0x9c, 0x74, 0xbd, 0xac, 0x59, 0x7d, 0x8a, 0x31, 0xfa, 0x10, 0x7f, 0x6e, 0xdd, 0x7e, 0x46, + 0x67, 0xe2, 0x71, 0x36, 0xdf, 0x77, 0xe6, 0xf7, 0xf7, 0xfb, 0x16, 0x2d, 0xad, 0x49, 0xe7, 0xe8, + 0xd3, 0x30, 0xa4, 0x7f, 0x94, 0x90, 0x34, 0x2e, 0x66, 0x0b, 0xaf, 0xda, 0x9e, 0xc0, 0x65, 0x7b, + 0xb5, 0xee, 0x75, 0x1c, 0x36, 0x38, 0xa2, 0x5a, 0x46, 0x6c, 0x83, 0xcb, 0xdd, 0x49, 0x32, 0xdd, + 0xbb, 0xbd, 0x11, 0xc8, 0x9e, 0xee, 0x68, 0x19, 0x06, 0x6a, 0x0d, 0x97, 0x78, 0xd1, 0x52, 0xa5, + 0x5d, 0xf4, 0xc6, 0x39, 0x41, 0x23, 0xd6, 0x8f, 0x48, 0xb1, 0xc2, 0x61, 0x58, 0x71, 0xb0, 0x7f, + 0xb3, 0x00, 0xe5, 0x0e, 0xf9, 0x7a, 0x12, 0x66, 0x28, 0xab, 0x2b, 0x33, 0xd4, 0x0c, 0x8c, 0xc6, + 0xff, 0x74, 0x0d, 0x97, 0xf2, 0x64, 0xbd, 0x65, 0xa2, 0x71, 0x92, 0xbe, 0xeb, 0x47, 0x09, 0xba, + 0x25, 0xab, 0xa7, 0xe3, 0xb3, 0x1a, 0xc3, 0x82, 0xdd, 0xdb, 0xfd, 0xb5, 0x37, 0xd7, 0x1a, 0x69, + 0x7f, 0xad, 0x00, 0x27, 0x55, 0x17, 0x7e, 0xfb, 0x76, 0xdc, 0xcd, 0x74, 0xc7, 0x1d, 0x81, 0x2d, + 0xd7, 0xbe, 0x01, 0x7d, 0x3c, 0x1c, 0x65, 0x17, 0xe2, 0xf6, 0x63, 0x66, 0x94, 0x6c, 0x25, 0xe1, + 0x19, 0x91, 0xb2, 0xbf, 0xdf, 0x82, 0xd1, 0xc4, 0xeb, 0x36, 0x84, 0xb5, 0x27, 0xd0, 0x0f, 0x22, + 0x12, 0x67, 0x09, 0xdb, 0x17, 0xa0, 0x67, 0xcb, 0x0f, 0xa3, 0xa4, 0xa3, 0xc7, 0x35, 0x3f, 0x8c, + 0x30, 0xc3, 0xd8, 0x7f, 0x68, 0x41, 0xef, 0x9a, 0xe3, 0x7a, 0x91, 0x34, 0x0a, 0x58, 0x39, 0x46, + 0x81, 0x6e, 0xbe, 0x0b, 0xbd, 0x04, 0x7d, 0x64, 0x63, 0x83, 0xd4, 0x22, 0x31, 0xaa, 0x32, 0x14, + 0x42, 0xdf, 0x02, 0x83, 0x52, 0xf9, 0x8f, 0x55, 0xc6, 0xff, 0x62, 0x41, 0x8c, 0x6e, 0x43, 0x29, + 0x72, 0x77, 0xc8, 0x4c, 0xbd, 0x2e, 0x4c, 0xe5, 0x0f, 0x10, 0xbf, 0x63, 0x4d, 0x32, 0xc0, 0x31, + 0x2f, 0xfb, 0x8b, 0x05, 0x80, 0x38, 0x8e, 0x57, 0xa7, 0x4f, 0x9c, 0x4d, 0x19, 0x51, 0x2f, 0x66, + 0x18, 0x51, 0x51, 0xcc, 0x30, 0xc3, 0x82, 0xaa, 0xba, 0xa9, 0xd8, 0x55, 0x37, 0xf5, 0x1c, 0xa6, + 0x9b, 0xe6, 0x60, 0x3c, 0x8e, 0x43, 0x66, 0x86, 0x61, 0x64, 0x47, 0xe7, 0x5a, 0x12, 0x89, 0xd3, + 0xf4, 0x36, 0x81, 0x0b, 0x2a, 0x1c, 0x93, 0x38, 0xd1, 0x98, 0x1f, 0xb8, 0x6e, 0x94, 0xee, 0xd0, + 0x4f, 0xb1, 0x95, 0xb8, 0x90, 0x6b, 0x25, 0xfe, 0x09, 0x0b, 0x4e, 0x24, 0xeb, 0x61, 0x8f, 0xa6, + 0xbf, 0x60, 0xc1, 0x49, 0x66, 0x2b, 0x67, 0xb5, 0xa6, 0x2d, 0xf3, 0x2f, 0xb6, 0x0d, 0x31, 0x95, + 0xd3, 0xe2, 0x38, 0xe6, 0xc6, 0x4a, 0x16, 0x6b, 0x9c, 0x5d, 0xa3, 0xfd, 0x5f, 0x7b, 0x60, 0x32, + 0x2f, 0x36, 0x15, 0x7b, 0x26, 0xe2, 0xdc, 0xad, 0x6e, 0x93, 0x3b, 0xc2, 0x19, 0x3f, 0x7e, 0x26, + 0xc2, 0xc1, 0x58, 0xe2, 0x93, 0xe9, 0x4f, 0x0a, 0x5d, 0xa6, 0x3f, 0xd9, 0x82, 0xf1, 0x3b, 0x5b, + 0xc4, 0xbb, 0xe9, 0x85, 0x4e, 0xe4, 0x86, 0x1b, 0x2e, 0xb3, 0x2b, 0xf3, 0x79, 0x23, 0x73, 0x50, + 0x8f, 0xdf, 0x4e, 0x12, 0x1c, 0xec, 0x97, 0xcf, 0x19, 0x80, 0xb8, 0xc9, 0x7c, 0x23, 0xc1, 0x69, + 0xa6, 0xe9, 0xec, 0x31, 0x3d, 0x0f, 0x39, 0x7b, 0xcc, 0x8e, 0x2b, 0xbc, 0x51, 0xe4, 0x1b, 0x00, + 0x76, 0x63, 0x5c, 0x51, 0x50, 0xac, 0x51, 0xa0, 0x4f, 0x02, 0xd2, 0x33, 0x74, 0x19, 0xa1, 0x41, + 0x9f, 0xbb, 0xbf, 0x5f, 0x46, 0xab, 0x29, 0xec, 0xc1, 0x7e, 0x79, 0x82, 0x42, 0x97, 0x3c, 0x7a, + 0xf3, 0x8c, 0xe3, 0xa9, 0x65, 0x30, 0x42, 0xb7, 0x61, 0x8c, 0x42, 0xd9, 0x8a, 0x92, 0x71, 0x47, + 0xf9, 0x6d, 0xf1, 0x99, 0xfb, 0xfb, 0xe5, 0xb1, 0xd5, 0x04, 0x2e, 0x8f, 0x75, 0x8a, 0x09, 0x7a, + 0x05, 0x46, 0xe2, 0x79, 0x75, 0x9d, 0xec, 0xf1, 0x00, 0x3d, 0x25, 0xae, 0xf0, 0x5e, 0x31, 0x30, + 0x38, 0x41, 0x69, 0x7f, 0xc1, 0x82, 0x33, 0xb9, 0x19, 0xf1, 0xd1, 0x25, 0x18, 0x70, 0x9a, 0x2e, + 0x37, 0x5f, 0x88, 0xa3, 0x86, 0xa9, 0xc9, 0x2a, 0x4b, 0xdc, 0x78, 0xa1, 0xb0, 0x74, 0x87, 0xdf, + 0x76, 0xbd, 0x7a, 0x72, 0x87, 0xbf, 0xee, 0x7a, 0x75, 0xcc, 0x30, 0xea, 0xc8, 0x2a, 0xe6, 0x3e, + 0x45, 0xf8, 0x0a, 0x5d, 0xab, 0x19, 0xb9, 0xf3, 0x8f, 0xb7, 0x19, 0xe8, 0x19, 0xdd, 0xd4, 0x28, + 0xbc, 0x0a, 0x73, 0xcd, 0x8c, 0xdf, 0x67, 0x81, 0x78, 0xba, 0xdc, 0xc5, 0x99, 0xfc, 0x26, 0x0c, + 0xed, 0xa6, 0xb3, 0x17, 0x5e, 0xc8, 0x7f, 0xcb, 0x2d, 0x22, 0xae, 0x2b, 0x41, 0xdb, 0xc8, 0x54, + 0x68, 0xf0, 0xb2, 0xeb, 0x20, 0xb0, 0xf3, 0x84, 0x19, 0x14, 0x3a, 0xb7, 0xe6, 0x79, 0x80, 0x3a, + 0xa3, 0x65, 0x29, 0x8d, 0x0b, 0xa6, 0xc4, 0x35, 0xaf, 0x30, 0x58, 0xa3, 0xb2, 0xff, 0x79, 0x01, + 0x06, 0x65, 0xb6, 0xbc, 0x96, 0xd7, 0x8d, 0xda, 0xef, 0x50, 0xe9, 0xb3, 0xd1, 0x65, 0x28, 0x31, + 0xbd, 0x74, 0x25, 0xd6, 0x96, 0x2a, 0xad, 0xd0, 0x8a, 0x44, 0xe0, 0x98, 0x86, 0xee, 0x8e, 0x61, + 0x6b, 0x9d, 0x91, 0x27, 0x1e, 0xda, 0x56, 0x39, 0x18, 0x4b, 0x3c, 0xfa, 0x18, 0x8c, 0xf1, 0x72, + 0x81, 0xdf, 0x74, 0x36, 0xb9, 0x2d, 0xab, 0x57, 0x45, 0x2f, 0x19, 0x5b, 0x49, 0xe0, 0x0e, 0xf6, + 0xcb, 0x27, 0x92, 0x30, 0x66, 0xa4, 0x4d, 0x71, 0x61, 0x2e, 0x6b, 0xbc, 0x12, 0xba, 0xab, 0xa7, + 0x3c, 0xdd, 0x62, 0x14, 0xd6, 0xe9, 0xec, 0x4f, 0x03, 0x4a, 0xe7, 0x0d, 0x44, 0xaf, 0x73, 0x97, + 0x67, 0x37, 0x20, 0xf5, 0x76, 0x46, 0x5b, 0x3d, 0x46, 0x87, 0x7c, 0x23, 0xc7, 0x4b, 0x61, 0x55, + 0xde, 0xfe, 0x3f, 0x8b, 0x30, 0x96, 0x8c, 0x0a, 0x80, 0xae, 0x41, 0x1f, 0x17, 0x29, 0x05, 0xfb, + 0x36, 0x3e, 0x41, 0x5a, 0x2c, 0x01, 0x76, 0xb8, 0x0a, 0xa9, 0x54, 0x94, 0x47, 0x6f, 0xc1, 0x60, + 0xdd, 0xbf, 0xe3, 0xdd, 0x71, 0x82, 0xfa, 0x4c, 0x65, 0x49, 0x4c, 0xe7, 0x4c, 0x45, 0xc5, 0x7c, + 0x4c, 0xa6, 0xc7, 0x27, 0x60, 0xf6, 0xef, 0x18, 0x85, 0x75, 0x76, 0x68, 0x8d, 0x25, 0xfa, 0xd8, + 0x70, 0x37, 0x57, 0x9c, 0x66, 0xbb, 0xf7, 0x2f, 0x73, 0x92, 0x48, 0xe3, 0x3c, 0x2c, 0xb2, 0x81, + 0x70, 0x04, 0x8e, 0x19, 0xa1, 0xcf, 0xc2, 0x44, 0x98, 0x63, 0x3a, 0xc9, 0x4b, 0x23, 0xdb, 0xce, + 0x9a, 0x30, 0x7b, 0xfa, 0xfe, 0x7e, 0x79, 0x22, 0xcb, 0xc8, 0x92, 0x55, 0x8d, 0xfd, 0xa5, 0x13, + 0x60, 0x2c, 0x62, 0x23, 0xab, 0xb8, 0x75, 0x44, 0x59, 0xc5, 0x31, 0x0c, 0x90, 0x9d, 0x66, 0xb4, + 0x37, 0xef, 0x06, 0x62, 0x4c, 0x32, 0x79, 0x2e, 0x08, 0x9a, 0x34, 0x4f, 0x89, 0xc1, 0x8a, 0x4f, + 0x76, 0xea, 0xf7, 0xe2, 0x37, 0x31, 0xf5, 0x7b, 0xcf, 0x31, 0xa6, 0x7e, 0x5f, 0x85, 0xfe, 0x4d, + 0x37, 0xc2, 0xa4, 0xe9, 0x8b, 0xcb, 0x5c, 0xe6, 0x3c, 0xbc, 0xca, 0x49, 0xd2, 0x49, 0x86, 0x05, + 0x02, 0x4b, 0x26, 0xe8, 0x75, 0xb5, 0x02, 0xfb, 0xf2, 0x15, 0x2e, 0x69, 0xe7, 0x95, 0xcc, 0x35, + 0x28, 0x12, 0xbc, 0xf7, 0x3f, 0x68, 0x82, 0xf7, 0x45, 0x99, 0x96, 0x7d, 0x20, 0xff, 0xb1, 0x1a, + 0xcb, 0xba, 0xde, 0x21, 0x19, 0xfb, 0x2d, 0x3d, 0x95, 0x7d, 0x29, 0x7f, 0x27, 0x50, 0x59, 0xea, + 0xbb, 0x4c, 0x60, 0xff, 0x7d, 0x16, 0x9c, 0x4c, 0xa6, 0x9a, 0x65, 0x6f, 0x2a, 0x84, 0x9f, 0xc7, + 0x4b, 0xdd, 0xe4, 0xfe, 0x65, 0x05, 0x8c, 0x0a, 0x99, 0x8e, 0x34, 0x93, 0x0c, 0x67, 0x57, 0x47, + 0x3b, 0x3a, 0x58, 0xaf, 0x0b, 0x7f, 0x83, 0xc7, 0x72, 0x32, 0xe1, 0xb7, 0xc9, 0x7f, 0xbf, 0x96, + 0x91, 0x75, 0xfd, 0xf1, 0xbc, 0xac, 0xeb, 0x5d, 0xe7, 0x5a, 0x7f, 0x5d, 0xe5, 0xc0, 0x1f, 0xce, + 0x9f, 0x4a, 0x3c, 0xc3, 0x7d, 0xc7, 0xcc, 0xf7, 0xaf, 0xab, 0xcc, 0xf7, 0x6d, 0x22, 0x8b, 0xf3, + 0xbc, 0xf6, 0x1d, 0xf3, 0xdd, 0x6b, 0x39, 0xeb, 0x47, 0x8f, 0x26, 0x67, 0xbd, 0x71, 0xd4, 0xf0, + 0xb4, 0xe9, 0xcf, 0x74, 0x38, 0x6a, 0x0c, 0xbe, 0xed, 0x0f, 0x1b, 0x9e, 0x9f, 0x7f, 0xfc, 0x81, + 0xf2, 0xf3, 0xdf, 0xd2, 0xf3, 0xdd, 0xa3, 0x0e, 0x09, 0xdd, 0x29, 0x51, 0x97, 0x59, 0xee, 0x6f, + 0xe9, 0x07, 0xe0, 0x44, 0x3e, 0x5f, 0x75, 0xce, 0xa5, 0xf9, 0x66, 0x1e, 0x81, 0xa9, 0xec, 0xf9, + 0x27, 0x8e, 0x27, 0x7b, 0xfe, 0xc9, 0x23, 0xcf, 0x9e, 0x7f, 0xea, 0x18, 0xb2, 0xe7, 0x9f, 0x3e, + 0xc6, 0xec, 0xf9, 0xb7, 0x98, 0x73, 0x14, 0x0f, 0x00, 0x25, 0x22, 0xa1, 0x3f, 0x95, 0x13, 0x3f, + 0x2d, 0x1d, 0x25, 0x8a, 0x7f, 0x9c, 0x42, 0xe1, 0x98, 0x55, 0x46, 0x56, 0xfe, 0xc9, 0x87, 0x90, + 0x95, 0x7f, 0x35, 0xce, 0xca, 0x7f, 0x26, 0x7f, 0xa8, 0x33, 0x9e, 0xd3, 0xe4, 0xe4, 0xe2, 0xbf, + 0xa5, 0xe7, 0xd0, 0x7f, 0xa4, 0x8d, 0x15, 0x2c, 0x4b, 0xa1, 0xdc, 0x26, 0x73, 0xfe, 0x6b, 0x3c, + 0x73, 0xfe, 0xd9, 0xfc, 0x9d, 0x3c, 0x79, 0xdc, 0x19, 0xf9, 0xf2, 0x69, 0xbb, 0x54, 0xf0, 0x57, + 0x16, 0xf3, 0x3d, 0xa7, 0x5d, 0x2a, 0x7a, 0x6c, 0xba, 0x5d, 0x0a, 0x85, 0x63, 0x56, 0xf6, 0x0f, + 0x14, 0xe0, 0x7c, 0xfb, 0xf5, 0x16, 0x6b, 0xc9, 0x2b, 0xb1, 0x43, 0x40, 0x42, 0x4b, 0xce, 0xef, + 0x6c, 0x31, 0x55, 0xd7, 0xf1, 0x20, 0xaf, 0xc2, 0xb8, 0x7a, 0x87, 0xd3, 0x70, 0x6b, 0x7b, 0xab, + 0xf1, 0x35, 0x59, 0x45, 0x4e, 0xa8, 0x26, 0x09, 0x70, 0xba, 0x0c, 0x9a, 0x81, 0x51, 0x03, 0xb8, + 0x34, 0x2f, 0xee, 0x66, 0x71, 0x94, 0x71, 0x13, 0x8d, 0x93, 0xf4, 0xf6, 0xcf, 0x59, 0x70, 0x3a, + 0x27, 0xe5, 0x6b, 0xd7, 0xe1, 0x0e, 0x37, 0x60, 0xb4, 0x69, 0x16, 0xed, 0x10, 0xa1, 0xd5, 0x48, + 0x2c, 0xab, 0xda, 0x9a, 0x40, 0xe0, 0x24, 0x53, 0xfb, 0x67, 0x0a, 0x70, 0xae, 0xad, 0x63, 0x29, + 0xc2, 0x70, 0x6a, 0x73, 0x27, 0x74, 0xe6, 0x02, 0x52, 0x27, 0x5e, 0xe4, 0x3a, 0x8d, 0x6a, 0x93, + 0xd4, 0x34, 0x3b, 0x07, 0xf3, 0xd0, 0xbc, 0xba, 0x52, 0x9d, 0x49, 0x53, 0xe0, 0x9c, 0x92, 0x68, + 0x11, 0x50, 0x1a, 0x23, 0x46, 0x98, 0x65, 0x0f, 0x48, 0xf3, 0xc3, 0x19, 0x25, 0xd0, 0x07, 0x61, + 0x58, 0x39, 0xac, 0x6a, 0x23, 0xce, 0x36, 0x76, 0xac, 0x23, 0xb0, 0x49, 0x87, 0xae, 0xf0, 0xf4, + 0x13, 0x22, 0x51, 0x89, 0x30, 0x8a, 0x8c, 0xca, 0xdc, 0x12, 0x02, 0x8c, 0x75, 0x9a, 0xd9, 0x97, + 0x7f, 0xeb, 0x1b, 0xe7, 0xdf, 0xf7, 0xbb, 0xdf, 0x38, 0xff, 0xbe, 0x3f, 0xf8, 0xc6, 0xf9, 0xf7, + 0x7d, 0xd7, 0xfd, 0xf3, 0xd6, 0x6f, 0xdd, 0x3f, 0x6f, 0xfd, 0xee, 0xfd, 0xf3, 0xd6, 0x1f, 0xdc, + 0x3f, 0x6f, 0xfd, 0xf1, 0xfd, 0xf3, 0xd6, 0x17, 0xff, 0xe4, 0xfc, 0xfb, 0xde, 0x44, 0x71, 0x00, + 0xd1, 0xcb, 0x74, 0x74, 0x2e, 0xef, 0x5e, 0xf9, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x0b, + 0x0a, 0x3d, 0x91, 0x13, 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -7459,6 +7728,18 @@ func (m *CSIPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, erro _ = i var l int _ = l + if m.NodeExpandSecretRef != nil { + { + size, err := m.NodeExpandSecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } if m.ControllerExpandSecretRef != nil { { size, err := m.ControllerExpandSecretRef.MarshalToSizedBuffer(dAtA[:i]) @@ -7919,6 +8200,43 @@ func (m *CinderVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ClaimSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClaimSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClaimSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ResourceClaimTemplateName != nil { + i -= len(*m.ResourceClaimTemplateName) + copy(dAtA[i:], *m.ResourceClaimTemplateName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimTemplateName))) + i-- + dAtA[i] = 0x12 + } + if m.ResourceClaimName != nil { + i -= len(*m.ResourceClaimName) + copy(dAtA[i:], *m.ResourceClaimName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -8502,6 +8820,31 @@ func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.RestartPolicy != nil { + i -= len(*m.RestartPolicy) + copy(dAtA[i:], *m.RestartPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RestartPolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.ResizePolicy) > 0 { + for iNdEx := len(m.ResizePolicy) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResizePolicy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } if m.StartupProbe != nil { { size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) @@ -8810,6 +9153,39 @@ func (m *ContainerPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ContainerResizePolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResizePolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResizePolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.RestartPolicy) + copy(dAtA[i:], m.RestartPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) + i-- + dAtA[i] = 0x12 + i -= len(m.ResourceName) + copy(dAtA[i:], m.ResourceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ContainerState) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9019,6 +9395,47 @@ func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Resources != nil { + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if len(m.AllocatedResources) > 0 { + keysForAllocatedResources := make([]string, 0, len(m.AllocatedResources)) + for k := range m.AllocatedResources { + keysForAllocatedResources = append(keysForAllocatedResources, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources) + for iNdEx := len(keysForAllocatedResources) - 1; iNdEx >= 0; iNdEx-- { + v := m.AllocatedResources[ResourceName(keysForAllocatedResources[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAllocatedResources[iNdEx]) + copy(dAtA[i:], keysForAllocatedResources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatedResources[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } if m.Started != nil { i-- if *m.Started { @@ -9765,6 +10182,31 @@ func (m *EphemeralContainerCommon) MarshalToSizedBuffer(dAtA []byte) (int, error _ = i var l int _ = l + if m.RestartPolicy != nil { + i -= len(*m.RestartPolicy) + copy(dAtA[i:], *m.RestartPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RestartPolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.ResizePolicy) > 0 { + for iNdEx := len(m.ResizePolicy) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResizePolicy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } if m.StartupProbe != nil { { size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) @@ -10899,6 +11341,34 @@ func (m *HostAlias) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *HostIP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostIP) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostIP) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *HostPathVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -13384,12 +13854,29 @@ func (m *PersistentVolumeClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, er _ = i var l int _ = l - if m.ResizeStatus != nil { - i -= len(*m.ResizeStatus) - copy(dAtA[i:], *m.ResizeStatus) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResizeStatus))) - i-- - dAtA[i] = 0x32 + if len(m.AllocatedResourceStatuses) > 0 { + keysForAllocatedResourceStatuses := make([]string, 0, len(m.AllocatedResourceStatuses)) + for k := range m.AllocatedResourceStatuses { + keysForAllocatedResourceStatuses = append(keysForAllocatedResourceStatuses, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResourceStatuses) + for iNdEx := len(keysForAllocatedResourceStatuses) - 1; iNdEx >= 0; iNdEx-- { + v := m.AllocatedResourceStatuses[ResourceName(keysForAllocatedResourceStatuses[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAllocatedResourceStatuses[iNdEx]) + copy(dAtA[i:], keysForAllocatedResourceStatuses[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatedResourceStatuses[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } } if len(m.AllocatedResources) > 0 { keysForAllocatedResources := make([]string, 0, len(m.AllocatedResources)) @@ -14048,6 +14535,18 @@ func (m *PersistentVolumeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + if m.LastPhaseTransitionTime != nil { + { + size, err := m.LastPhaseTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } i -= len(m.Reason) copy(dAtA[i:], m.Reason) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -14873,6 +15372,107 @@ func (m *PodReadinessGate) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PodResourceClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodResourceClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodResourceClaimStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ResourceClaimName != nil { + i -= len(*m.ResourceClaimName) + copy(dAtA[i:], *m.ResourceClaimName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimName))) + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSchedulingGate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSchedulingGate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSchedulingGate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *PodSecurityContext) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -15040,6 +15640,50 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.ResourceClaims) > 0 { + for iNdEx := len(m.ResourceClaims) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceClaims[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xba + } + } + if len(m.SchedulingGates) > 0 { + for iNdEx := len(m.SchedulingGates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SchedulingGates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb2 + } + } + if m.HostUsers != nil { + i-- + if *m.HostUsers { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa8 + } if m.OS != nil { { size, err := m.OS.MarshalToSizedBuffer(dAtA[:i]) @@ -15470,6 +16114,41 @@ func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.HostIPs) > 0 { + for iNdEx := len(m.HostIPs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HostIPs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + } + if len(m.ResourceClaimStatuses) > 0 { + for iNdEx := len(m.ResourceClaimStatuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceClaimStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + } + i -= len(m.Resize) + copy(dAtA[i:], m.Resize) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resize))) + i-- + dAtA[i] = 0x72 if len(m.EphemeralContainerStatuses) > 0 { for iNdEx := len(m.EphemeralContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { { @@ -16657,6 +17336,34 @@ func (m *ReplicationControllerStatus) MarshalToSizedBuffer(dAtA []byte) (int, er return len(dAtA) - i, nil } +func (m *ResourceClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ResourceFieldSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -16974,6 +17681,20 @@ func (m *ResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Claims) > 0 { + for iNdEx := len(m.Claims) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Claims[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } if len(m.Requests) > 0 { keysForRequests := make([]string, 0, len(m.Requests)) for k := range m.Requests { @@ -18902,6 +19623,29 @@ func (m *TopologySpreadConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error _ = i var l int _ = l + if len(m.MatchLabelKeys) > 0 { + for iNdEx := len(m.MatchLabelKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MatchLabelKeys[iNdEx]) + copy(dAtA[i:], m.MatchLabelKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MatchLabelKeys[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.NodeTaintsPolicy != nil { + i -= len(*m.NodeTaintsPolicy) + copy(dAtA[i:], *m.NodeTaintsPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeTaintsPolicy))) + i-- + dAtA[i] = 0x3a + } + if m.NodeAffinityPolicy != nil { + i -= len(*m.NodeAffinityPolicy) + copy(dAtA[i:], *m.NodeAffinityPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeAffinityPolicy))) + i-- + dAtA[i] = 0x32 + } if m.MinDomains != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.MinDomains)) i-- @@ -18975,6 +19719,53 @@ func (m *TypedLocalObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, erro return len(dAtA) - i, nil } +func (m *TypedObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TypedObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypedObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Namespace != nil { + i -= len(*m.Namespace) + copy(dAtA[i:], *m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Namespace))) + i-- + dAtA[i] = 0x22 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + if m.APIGroup != nil { + i -= len(*m.APIGroup) + copy(dAtA[i:], *m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *Volume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -19927,6 +20718,10 @@ func (m *CSIPersistentVolumeSource) Size() (n int) { l = m.ControllerExpandSecretRef.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.NodeExpandSecretRef != nil { + l = m.NodeExpandSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -20069,6 +20864,23 @@ func (m *CinderVolumeSource) Size() (n int) { return n } +func (m *ClaimSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ResourceClaimName != nil { + l = len(*m.ResourceClaimName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResourceClaimTemplateName != nil { + l = len(*m.ResourceClaimTemplateName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *ClientIPConfig) Size() (n int) { if m == nil { return 0 @@ -20360,6 +21172,16 @@ func (m *Container) Size() (n int) { l = m.StartupProbe.Size() n += 2 + l + sovGenerated(uint64(l)) } + if len(m.ResizePolicy) > 0 { + for _, e := range m.ResizePolicy { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.RestartPolicy != nil { + l = len(*m.RestartPolicy) + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -20396,6 +21218,19 @@ func (m *ContainerPort) Size() (n int) { return n } +func (m *ContainerResizePolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ResourceName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RestartPolicy) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ContainerState) Size() (n int) { if m == nil { return 0 @@ -20485,6 +21320,19 @@ func (m *ContainerStatus) Size() (n int) { if m.Started != nil { n += 2 } + if len(m.AllocatedResources) > 0 { + for k, v := range m.AllocatedResources { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Resources != nil { + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -20827,6 +21675,16 @@ func (m *EphemeralContainerCommon) Size() (n int) { l = m.StartupProbe.Size() n += 2 + l + sovGenerated(uint64(l)) } + if len(m.ResizePolicy) > 0 { + for _, e := range m.ResizePolicy { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.RestartPolicy != nil { + l = len(*m.RestartPolicy) + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -21163,6 +22021,17 @@ func (m *HostAlias) Size() (n int) { return n } +func (m *HostIP) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *HostPathVolumeSource) Size() (n int) { if m == nil { return 0 @@ -22102,9 +22971,13 @@ func (m *PersistentVolumeClaimStatus) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - if m.ResizeStatus != nil { - l = len(*m.ResizeStatus) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.AllocatedResourceStatuses) > 0 { + for k, v := range m.AllocatedResourceStatuses { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } return n } @@ -22308,6 +23181,10 @@ func (m *PersistentVolumeStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Reason) n += 1 + l + sovGenerated(uint64(l)) + if m.LastPhaseTransitionTime != nil { + l = m.LastPhaseTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -22608,6 +23485,45 @@ func (m *PodReadinessGate) Size() (n int) { return n } +func (m *PodResourceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodResourceClaimStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.ResourceClaimName != nil { + l = len(*m.ResourceClaimName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodSchedulingGate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *PodSecurityContext) Size() (n int) { if m == nil { return 0 @@ -22812,6 +23728,21 @@ func (m *PodSpec) Size() (n int) { l = m.OS.Size() n += 2 + l + sovGenerated(uint64(l)) } + if m.HostUsers != nil { + n += 3 + } + if len(m.SchedulingGates) > 0 { + for _, e := range m.SchedulingGates { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceClaims) > 0 { + for _, e := range m.ResourceClaims { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -22869,6 +23800,20 @@ func (m *PodStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + l = len(m.Resize) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ResourceClaimStatuses) > 0 { + for _, e := range m.ResourceClaimStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.HostIPs) > 0 { + for _, e := range m.HostIPs { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -23255,6 +24200,17 @@ func (m *ReplicationControllerStatus) Size() (n int) { return n } +func (m *ResourceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ResourceFieldSelector) Size() (n int) { if m == nil { return 0 @@ -23381,6 +24337,12 @@ func (m *ResourceRequirements) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + if len(m.Claims) > 0 { + for _, e := range m.Claims { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -24093,6 +25055,20 @@ func (m *TopologySpreadConstraint) Size() (n int) { if m.MinDomains != nil { n += 1 + sovGenerated(uint64(*m.MinDomains)) } + if m.NodeAffinityPolicy != nil { + l = len(*m.NodeAffinityPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeTaintsPolicy != nil { + l = len(*m.NodeTaintsPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MatchLabelKeys) > 0 { + for _, s := range m.MatchLabelKeys { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -24113,6 +25089,27 @@ func (m *TypedLocalObjectReference) Size() (n int) { return n } +func (m *TypedObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.APIGroup != nil { + l = len(*m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Namespace != nil { + l = len(*m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *Volume) Size() (n int) { if m == nil { return 0 @@ -24509,6 +25506,7 @@ func (this *CSIPersistentVolumeSource) String() string { `NodeStageSecretRef:` + strings.Replace(this.NodeStageSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, `NodePublishSecretRef:` + strings.Replace(this.NodePublishSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, `ControllerExpandSecretRef:` + strings.Replace(this.ControllerExpandSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `NodeExpandSecretRef:` + strings.Replace(this.NodeExpandSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, `}`, }, "") return s @@ -24604,6 +25602,17 @@ func (this *CinderVolumeSource) String() string { }, "") return s } +func (this *ClaimSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClaimSource{`, + `ResourceClaimName:` + valueToStringGenerated(this.ResourceClaimName) + `,`, + `ResourceClaimTemplateName:` + valueToStringGenerated(this.ResourceClaimTemplateName) + `,`, + `}`, + }, "") + return s +} func (this *ClientIPConfig) String() string { if this == nil { return "nil" @@ -24809,6 +25818,11 @@ func (this *Container) String() string { repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," } repeatedStringForVolumeDevices += "}" + repeatedStringForResizePolicy := "[]ContainerResizePolicy{" + for _, f := range this.ResizePolicy { + repeatedStringForResizePolicy += strings.Replace(strings.Replace(f.String(), "ContainerResizePolicy", "ContainerResizePolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForResizePolicy += "}" s := strings.Join([]string{`&Container{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Image:` + fmt.Sprintf("%v", this.Image) + `,`, @@ -24832,6 +25846,8 @@ func (this *Container) String() string { `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, + `ResizePolicy:` + repeatedStringForResizePolicy + `,`, + `RestartPolicy:` + valueToStringGenerated(this.RestartPolicy) + `,`, `}`, }, "") return s @@ -24861,6 +25877,17 @@ func (this *ContainerPort) String() string { }, "") return s } +func (this *ContainerResizePolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResizePolicy{`, + `ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`, + `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, + `}`, + }, "") + return s +} func (this *ContainerState) String() string { if this == nil { return "nil" @@ -24914,6 +25941,16 @@ func (this *ContainerStatus) String() string { if this == nil { return "nil" } + keysForAllocatedResources := make([]string, 0, len(this.AllocatedResources)) + for k := range this.AllocatedResources { + keysForAllocatedResources = append(keysForAllocatedResources, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources) + mapStringForAllocatedResources := "ResourceList{" + for _, k := range keysForAllocatedResources { + mapStringForAllocatedResources += fmt.Sprintf("%v: %v,", k, this.AllocatedResources[ResourceName(k)]) + } + mapStringForAllocatedResources += "}" s := strings.Join([]string{`&ContainerStatus{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, @@ -24924,6 +25961,8 @@ func (this *ContainerStatus) String() string { `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, `Started:` + valueToStringGenerated(this.Started) + `,`, + `AllocatedResources:` + mapStringForAllocatedResources + `,`, + `Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`, `}`, }, "") return s @@ -25155,6 +26194,11 @@ func (this *EphemeralContainerCommon) String() string { repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," } repeatedStringForVolumeDevices += "}" + repeatedStringForResizePolicy := "[]ContainerResizePolicy{" + for _, f := range this.ResizePolicy { + repeatedStringForResizePolicy += strings.Replace(strings.Replace(f.String(), "ContainerResizePolicy", "ContainerResizePolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForResizePolicy += "}" s := strings.Join([]string{`&EphemeralContainerCommon{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Image:` + fmt.Sprintf("%v", this.Image) + `,`, @@ -25178,6 +26222,8 @@ func (this *EphemeralContainerCommon) String() string { `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, + `ResizePolicy:` + repeatedStringForResizePolicy + `,`, + `RestartPolicy:` + valueToStringGenerated(this.RestartPolicy) + `,`, `}`, }, "") return s @@ -25439,6 +26485,16 @@ func (this *HostAlias) String() string { }, "") return s } +func (this *HostIP) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostIP{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `}`, + }, "") + return s +} func (this *HostPathVolumeSource) String() string { if this == nil { return "nil" @@ -26156,7 +27212,7 @@ func (this *PersistentVolumeClaimSpec) String() string { `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, `DataSource:` + strings.Replace(this.DataSource.String(), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, - `DataSourceRef:` + strings.Replace(this.DataSourceRef.String(), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, + `DataSourceRef:` + strings.Replace(this.DataSourceRef.String(), "TypedObjectReference", "TypedObjectReference", 1) + `,`, `}`, }, "") return s @@ -26190,13 +27246,23 @@ func (this *PersistentVolumeClaimStatus) String() string { mapStringForAllocatedResources += fmt.Sprintf("%v: %v,", k, this.AllocatedResources[ResourceName(k)]) } mapStringForAllocatedResources += "}" + keysForAllocatedResourceStatuses := make([]string, 0, len(this.AllocatedResourceStatuses)) + for k := range this.AllocatedResourceStatuses { + keysForAllocatedResourceStatuses = append(keysForAllocatedResourceStatuses, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResourceStatuses) + mapStringForAllocatedResourceStatuses := "map[ResourceName]ClaimResourceStatus{" + for _, k := range keysForAllocatedResourceStatuses { + mapStringForAllocatedResourceStatuses += fmt.Sprintf("%v: %v,", k, this.AllocatedResourceStatuses[ResourceName(k)]) + } + mapStringForAllocatedResourceStatuses += "}" s := strings.Join([]string{`&PersistentVolumeClaimStatus{`, `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, `Capacity:` + mapStringForCapacity + `,`, `Conditions:` + repeatedStringForConditions + `,`, `AllocatedResources:` + mapStringForAllocatedResources + `,`, - `ResizeStatus:` + valueToStringGenerated(this.ResizeStatus) + `,`, + `AllocatedResourceStatuses:` + mapStringForAllocatedResourceStatuses + `,`, `}`, }, "") return s @@ -26306,6 +27372,7 @@ func (this *PersistentVolumeStatus) String() string { `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `LastPhaseTransitionTime:` + strings.Replace(fmt.Sprintf("%v", this.LastPhaseTransitionTime), "Time", "v1.Time", 1) + `,`, `}`, }, "") return s @@ -26544,6 +27611,38 @@ func (this *PodReadinessGate) String() string { }, "") return s } +func (this *PodResourceClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodResourceClaim{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "ClaimSource", "ClaimSource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodResourceClaimStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodResourceClaimStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ResourceClaimName:` + valueToStringGenerated(this.ResourceClaimName) + `,`, + `}`, + }, "") + return s +} +func (this *PodSchedulingGate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSchedulingGate{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} func (this *PodSecurityContext) String() string { if this == nil { return "nil" @@ -26627,6 +27726,16 @@ func (this *PodSpec) String() string { repeatedStringForEphemeralContainers += strings.Replace(strings.Replace(f.String(), "EphemeralContainer", "EphemeralContainer", 1), `&`, ``, 1) + "," } repeatedStringForEphemeralContainers += "}" + repeatedStringForSchedulingGates := "[]PodSchedulingGate{" + for _, f := range this.SchedulingGates { + repeatedStringForSchedulingGates += strings.Replace(strings.Replace(f.String(), "PodSchedulingGate", "PodSchedulingGate", 1), `&`, ``, 1) + "," + } + repeatedStringForSchedulingGates += "}" + repeatedStringForResourceClaims := "[]PodResourceClaim{" + for _, f := range this.ResourceClaims { + repeatedStringForResourceClaims += strings.Replace(strings.Replace(f.String(), "PodResourceClaim", "PodResourceClaim", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceClaims += "}" keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) for k := range this.NodeSelector { keysForNodeSelector = append(keysForNodeSelector, k) @@ -26684,6 +27793,9 @@ func (this *PodSpec) String() string { `EphemeralContainers:` + repeatedStringForEphemeralContainers + `,`, `SetHostnameAsFQDN:` + valueToStringGenerated(this.SetHostnameAsFQDN) + `,`, `OS:` + strings.Replace(this.OS.String(), "PodOS", "PodOS", 1) + `,`, + `HostUsers:` + valueToStringGenerated(this.HostUsers) + `,`, + `SchedulingGates:` + repeatedStringForSchedulingGates + `,`, + `ResourceClaims:` + repeatedStringForResourceClaims + `,`, `}`, }, "") return s @@ -26717,6 +27829,16 @@ func (this *PodStatus) String() string { repeatedStringForEphemeralContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + "," } repeatedStringForEphemeralContainerStatuses += "}" + repeatedStringForResourceClaimStatuses := "[]PodResourceClaimStatus{" + for _, f := range this.ResourceClaimStatuses { + repeatedStringForResourceClaimStatuses += strings.Replace(strings.Replace(f.String(), "PodResourceClaimStatus", "PodResourceClaimStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceClaimStatuses += "}" + repeatedStringForHostIPs := "[]HostIP{" + for _, f := range this.HostIPs { + repeatedStringForHostIPs += strings.Replace(strings.Replace(f.String(), "HostIP", "HostIP", 1), `&`, ``, 1) + "," + } + repeatedStringForHostIPs += "}" s := strings.Join([]string{`&PodStatus{`, `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, `Conditions:` + repeatedStringForConditions + `,`, @@ -26731,6 +27853,9 @@ func (this *PodStatus) String() string { `NominatedNodeName:` + fmt.Sprintf("%v", this.NominatedNodeName) + `,`, `PodIPs:` + repeatedStringForPodIPs + `,`, `EphemeralContainerStatuses:` + repeatedStringForEphemeralContainerStatuses + `,`, + `Resize:` + fmt.Sprintf("%v", this.Resize) + `,`, + `ResourceClaimStatuses:` + repeatedStringForResourceClaimStatuses + `,`, + `HostIPs:` + repeatedStringForHostIPs + `,`, `}`, }, "") return s @@ -27033,6 +28158,16 @@ func (this *ReplicationControllerStatus) String() string { }, "") return s } +func (this *ResourceClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaim{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} func (this *ResourceFieldSelector) String() string { if this == nil { return "nil" @@ -27130,6 +28265,11 @@ func (this *ResourceRequirements) String() string { if this == nil { return "nil" } + repeatedStringForClaims := "[]ResourceClaim{" + for _, f := range this.Claims { + repeatedStringForClaims += strings.Replace(strings.Replace(f.String(), "ResourceClaim", "ResourceClaim", 1), `&`, ``, 1) + "," + } + repeatedStringForClaims += "}" keysForLimits := make([]string, 0, len(this.Limits)) for k := range this.Limits { keysForLimits = append(keysForLimits, string(k)) @@ -27153,6 +28293,7 @@ func (this *ResourceRequirements) String() string { s := strings.Join([]string{`&ResourceRequirements{`, `Limits:` + mapStringForLimits + `,`, `Requests:` + mapStringForRequests + `,`, + `Claims:` + repeatedStringForClaims + `,`, `}`, }, "") return s @@ -27681,6 +28822,9 @@ func (this *TopologySpreadConstraint) String() string { `WhenUnsatisfiable:` + fmt.Sprintf("%v", this.WhenUnsatisfiable) + `,`, `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `MinDomains:` + valueToStringGenerated(this.MinDomains) + `,`, + `NodeAffinityPolicy:` + valueToStringGenerated(this.NodeAffinityPolicy) + `,`, + `NodeTaintsPolicy:` + valueToStringGenerated(this.NodeTaintsPolicy) + `,`, + `MatchLabelKeys:` + fmt.Sprintf("%v", this.MatchLabelKeys) + `,`, `}`, }, "") return s @@ -27697,6 +28841,19 @@ func (this *TypedLocalObjectReference) String() string { }, "") return s } +func (this *TypedObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TypedObjectReference{`, + `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + valueToStringGenerated(this.Namespace) + `,`, + `}`, + }, "") + return s +} func (this *Volume) String() string { if this == nil { return "nil" @@ -29416,145 +30573,181 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSIVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.ReadOnly = &b - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.FSType = &s - iNdEx = postIndex - case 4: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeExpandSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeExpandSecretRef == nil { + m.NodeExpandSecretRef = &SecretReference{} + } + if err := m.NodeExpandSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnly = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.FSType = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30658,6 +31851,122 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } return nil } +func (m *ClaimSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClaimSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClaimSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ResourceClaimName = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimTemplateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ResourceClaimTemplateName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -33090,6 +34399,73 @@ func (m *Container) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResizePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResizePolicy = append(m.ResizePolicy, ContainerResizePolicy{}) + if err := m.ResizePolicy[len(m.ResizePolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ContainerRestartPolicy(dAtA[iNdEx:postIndex]) + m.RestartPolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -33396,7 +34772,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerState) Unmarshal(dAtA []byte) error { +func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33419,17 +34795,17 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerState: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerResizePolicy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerResizePolicy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33439,69 +34815,29 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Waiting == nil { - m.Waiting = &ContainerStateWaiting{} - } - if err := m.Waiting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ResourceName = ResourceName(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Running == nil { - m.Running = &ContainerStateRunning{} - } - if err := m.Running.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33511,27 +34847,23 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Terminated == nil { - m.Terminated = &ContainerStateTerminated{} - } - if err := m.Terminated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.RestartPolicy = ResourceResizeRestartPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -33554,7 +34886,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { +func (m *ContainerState) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33577,15 +34909,15 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerState: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -33612,103 +34944,18 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Waiting == nil { + m.Waiting = &ContainerStateWaiting{} } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + if err := m.Waiting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) - } - m.ExitCode = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExitCode |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } + iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) - } - m.Signal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Signal |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33718,59 +34965,302 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if m.Running == nil { + m.Running = &ContainerStateRunning{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.Running.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Terminated == nil { + m.Terminated = &ContainerStateTerminated{} + } + if err := m.Terminated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) + } + m.ExitCode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitCode |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) + } + m.Signal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Signal |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -34284,6 +35774,171 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.Started = &b + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AllocatedResources == nil { + m.AllocatedResources = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AllocatedResources[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &ResourceRequirements{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -36889,14 +38544,50 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) - if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) + if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartupProbe == nil { + m.StartupProbe = &Probe{} + } + if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 22: + case 23: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResizePolicy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -36923,13 +38614,44 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.StartupProbe == nil { - m.StartupProbe = &Probe{} - } - if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ResizePolicy = append(m.ResizePolicy, ContainerResizePolicy{}) + if err := m.ResizePolicy[len(m.ResizePolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ContainerRestartPolicy(dAtA[iNdEx:postIndex]) + m.RestartPolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -40020,6 +41742,88 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { } return nil } +func (m *HostIP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostIP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostIP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -47965,7 +49769,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.DataSourceRef == nil { - m.DataSourceRef = &TypedLocalObjectReference{} + m.DataSourceRef = &TypedObjectReference{} } if err := m.DataSourceRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -48377,11 +50181,11 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } m.AllocatedResources[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex - case 6: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResizeStatus", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResourceStatuses", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -48391,24 +50195,118 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := PersistentVolumeClaimResizeStatus(dAtA[iNdEx:postIndex]) - m.ResizeStatus = &s + if m.AllocatedResourceStatuses == nil { + m.AllocatedResourceStatuses = make(map[ResourceName]ClaimResourceStatus) + } + var mapkey ResourceName + var mapvalue ClaimResourceStatus + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = ClaimResourceStatus(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AllocatedResourceStatuses[ResourceName(mapkey)] = ((ClaimResourceStatus)(mapvalue)) iNdEx = postIndex default: iNdEx = preIndex @@ -50178,6 +52076,42 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastPhaseTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastPhaseTransitionTime == nil { + m.LastPhaseTransitionTime = &v1.Time{} + } + if err := m.LastPhaseTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -52309,88 +54243,367 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodPortForwardOptions: wiretype end group for non-group") + return fmt.Errorf("proto: PodPortForwardOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodPortForwardOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Ports) == 0 { + m.Ports = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodProxyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodReadinessGate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodReadinessGate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConditionType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConditionType = PodConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodResourceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodResourceClaim: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodPortForwardOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType == 0 { - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - m.Ports = append(m.Ports, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if iNdEx >= l { + return io.ErrUnexpectedEOF } - if packedLen < 0 { - return ErrInvalidLengthGenerated + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.Ports) == 0 { - m.Ports = make([]int32, 0, elementCount) - } - for iNdEx < postIndex { - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Ports = append(m.Ports, v) + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -52412,7 +54625,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { +func (m *PodResourceClaimStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -52435,15 +54648,15 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodProxyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: PodResourceClaimStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -52471,7 +54684,40 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ResourceClaimName = &s iNdEx = postIndex default: iNdEx = preIndex @@ -52494,7 +54740,7 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { +func (m *PodSchedulingGate) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -52517,15 +54763,15 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodReadinessGate: wiretype end group for non-group") + return fmt.Errorf("proto: PodSchedulingGate: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodReadinessGate: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodSchedulingGate: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConditionType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -52553,7 +54799,7 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ConditionType = PodConditionType(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -54241,14 +56487,126 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TopologySpreadConstraints = append(m.TopologySpreadConstraints, TopologySpreadConstraint{}) - if err := m.TopologySpreadConstraints[len(m.TopologySpreadConstraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.TopologySpreadConstraints = append(m.TopologySpreadConstraints, TopologySpreadConstraint{}) + if err := m.TopologySpreadConstraints[len(m.TopologySpreadConstraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EphemeralContainers = append(m.EphemeralContainers, EphemeralContainer{}) + if err := m.EphemeralContainers[len(m.EphemeralContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 35: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SetHostnameAsFQDN", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.SetHostnameAsFQDN = &b + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OS == nil { + m.OS = &PodOS{} + } + if err := m.OS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 34: + case 37: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostUsers", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.HostUsers = &b + case 38: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SchedulingGates", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -54275,35 +56633,14 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.EphemeralContainers = append(m.EphemeralContainers, EphemeralContainer{}) - if err := m.EphemeralContainers[len(m.EphemeralContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.SchedulingGates = append(m.SchedulingGates, PodSchedulingGate{}) + if err := m.SchedulingGates[len(m.SchedulingGates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 35: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SetHostnameAsFQDN", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.SetHostnameAsFQDN = &b - case 36: + case 39: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaims", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -54330,10 +56667,8 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.OS == nil { - m.OS = &PodOS{} - } - if err := m.OS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ResourceClaims = append(m.ResourceClaims, PodResourceClaim{}) + if err := m.ResourceClaims[len(m.ResourceClaims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -54817,6 +57152,106 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resize", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resize = PodResizeStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceClaimStatuses = append(m.ResourceClaimStatuses, PodResourceClaimStatus{}) + if err := m.ResourceClaimStatuses[len(m.ResourceClaimStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIPs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostIPs = append(m.HostIPs, HostIP{}) + if err := m.HostIPs[len(m.HostIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -58315,6 +60750,88 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } return nil } +func (m *ResourceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -59570,6 +62087,40 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } m.Requests[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Claims", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Claims = append(m.Claims, ResourceClaim{}) + if err := m.Claims[len(m.Claims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -63838,7 +66389,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalTrafficPolicy = ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex]) + m.ExternalTrafficPolicy = ServiceExternalTrafficPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 12: if wireType != 0 { @@ -63945,7 +66496,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := IPFamilyPolicyType(dAtA[iNdEx:postIndex]) + s := IPFamilyPolicy(dAtA[iNdEx:postIndex]) m.IPFamilyPolicy = &s iNdEx = postIndex case 18: @@ -64096,7 +66647,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := ServiceInternalTrafficPolicyType(dAtA[iNdEx:postIndex]) + s := ServiceInternalTrafficPolicy(dAtA[iNdEx:postIndex]) m.InternalTrafficPolicy = &s iNdEx = postIndex default: @@ -65702,6 +68253,104 @@ func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { } } m.MinDomains = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinityPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := NodeInclusionPolicy(dAtA[iNdEx:postIndex]) + m.NodeAffinityPolicy = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeTaintsPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := NodeInclusionPolicy(dAtA[iNdEx:postIndex]) + m.NodeTaintsPolicy = &s + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabelKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchLabelKeys = append(m.MatchLabelKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -65870,6 +68519,186 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } return nil } +func (m *TypedObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypedObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypedObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.APIGroup = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Namespace = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Volume) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index 41423f45f..901e83731 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -220,11 +220,20 @@ message CSIPersistentVolumeSource { // controllerExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerExpandVolume call. - // This is an alpha field and requires enabling ExpandCSIVolumes feature gate. // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional optional SecretReference controllerExpandSecretRef = 9; + + // nodeExpandSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // NodeExpandVolume call. + // This is a beta field which is enabled default by CSINodeExpandSecret feature gate. + // This field is optional, may be omitted if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +featureGate=CSINodeExpandSecret + // +optional + optional SecretReference nodeExpandSecretRef = 10; } // Represents a source location of a volume to mount, managed by an external CSI driver @@ -391,6 +400,30 @@ message CinderVolumeSource { optional LocalObjectReference secretRef = 4; } +// ClaimSource describes a reference to a ResourceClaim. +// +// Exactly one of these fields should be set. Consumers of this type must +// treat an empty object as if it has an unknown value. +message ClaimSource { + // ResourceClaimName is the name of a ResourceClaim object in the same + // namespace as this pod. + optional string resourceClaimName = 1; + + // ResourceClaimTemplateName is the name of a ResourceClaimTemplate + // object in the same namespace as this pod. + // + // The template will be used to create a new ResourceClaim, which will + // be bound to this pod. When this pod is deleted, the ResourceClaim + // will also be deleted. The pod name and resource name, along with a + // generated component, will be used to form a unique name for the + // ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + // + // This field is immutable and no changes will be made to the + // corresponding ResourceClaim by the control plane after creating the + // ResourceClaim. + optional string resourceClaimTemplateName = 2; +} + // ClientIPConfig represents the configurations of Client IP based session affinity. message ClientIPConfig { // timeoutSeconds specifies the seconds of ClientIP type session sticky time. @@ -647,12 +680,12 @@ message Container { // +optional optional string workingDir = 5; - // List of ports to expose from the container. Exposing a port here gives - // the system additional information about the network connections a - // container uses, but is primarily informational. Not specifying a port here + // List of ports to expose from the container. Not specifying a port here // DOES NOT prevent that port from being exposed. Any port which is // listening on the default "0.0.0.0" address inside a container will be // accessible from the network. + // Modifying this array with strategic merge patch may corrupt the data. + // For more information See https://github.com/kubernetes/kubernetes/issues/108255. // Cannot be updated. // +optional // +patchMergeKey=containerPort @@ -684,6 +717,31 @@ message Container { // +optional optional ResourceRequirements resources = 8; + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + repeated ContainerResizePolicy resizePolicy = 23; + + // RestartPolicy defines the restart behavior of individual containers in a pod. + // This field may only be set for init containers, and the only allowed value is "Always". + // For non-init containers or when this field is not specified, + // the restart behavior is defined by the Pod's restart policy and the container type. + // Setting the RestartPolicy as "Always" for the init container will have the following effect: + // this init container will be continually restarted on + // exit until all regular containers have terminated. Once all regular + // containers have completed, all init containers with restartPolicy "Always" + // will be shut down. This lifecycle differs from normal init containers and + // is often referred to as a "sidecar" container. Although this init + // container still starts in the init container sequence, it does not wait + // for the container to complete before proceeding to the next init + // container. Instead, the next init container starts immediately after this + // init container is started, or after any startupProbe has successfully + // completed. + // +featureGate=SidecarContainers + // +optional + optional string restartPolicy = 24; + // Pod volumes to mount into the container's filesystem. // Cannot be updated. // +optional @@ -785,7 +843,7 @@ message Container { // Describe a container image message ContainerImage { // Names by which this image is known. - // e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + // e.g. ["kubernetes.example/hyperkube:v1.0.7", "cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7"] // +optional repeated string names = 1; @@ -824,6 +882,17 @@ message ContainerPort { optional string hostIP = 5; } +// ContainerResizePolicy represents resource resize policy for the container. +message ContainerResizePolicy { + // Name of the resource to which this resource resize policy applies. + // Supported values: cpu, memory. + optional string resourceName = 1; + + // Restart policy to apply when specified resource is resized. + // If not specified, it defaults to NotRequired. + optional string restartPolicy = 2; +} + // ContainerState holds a possible state of container. // Only one of its members may be specified. // If none of them is specified, the default one is ContainerStateWaiting. @@ -891,41 +960,76 @@ message ContainerStateWaiting { // ContainerStatus contains details for the current status of this container. message ContainerStatus { - // This must be a DNS_LABEL. Each container in a pod must have a unique name. + // Name is a DNS_LABEL representing the unique name of the container. + // Each container in a pod must have a unique name across all container types. // Cannot be updated. optional string name = 1; - // Details about the container's current condition. + // State holds details about the container's current condition. // +optional optional ContainerState state = 2; - // Details about the container's last termination condition. + // LastTerminationState holds the last termination state of the container to + // help debug container crashes and restarts. This field is not + // populated if the container is still running and RestartCount is 0. // +optional optional ContainerState lastState = 3; - // Specifies whether the container has passed its readiness probe. + // Ready specifies whether the container is currently passing its readiness check. + // The value will change as readiness probes keep executing. If no readiness + // probes are specified, this field defaults to true once the container is + // fully started (see Started field). + // + // The value is typically used to determine whether a container is ready to + // accept traffic. optional bool ready = 4; - // The number of times the container has been restarted. + // RestartCount holds the number of times the container has been restarted. + // Kubelet makes an effort to always increment the value, but there + // are cases when the state may be lost due to node restarts and then the value + // may be reset to 0. The value is never negative. optional int32 restartCount = 5; - // The image the container is running. + // Image is the name of container image that the container is running. + // The container image may not match the image used in the PodSpec, + // as it may have been resolved by the runtime. // More info: https://kubernetes.io/docs/concepts/containers/images. optional string image = 6; - // ImageID of the container's image. + // ImageID is the image ID of the container's image. The image ID may not + // match the image ID of the image used in the PodSpec, as it may have been + // resolved by the runtime. optional string imageID = 7; - // Container's ID in the format '://'. + // ContainerID is the ID of the container in the format '://'. + // Where type is a container runtime identifier, returned from Version call of CRI API + // (for example "containerd"). // +optional optional string containerID = 8; - // Specifies whether the container has passed its startup probe. - // Initialized as false, becomes true after startupProbe is considered successful. - // Resets to false when the container is restarted, or if kubelet loses state temporarily. - // Is always true when no startupProbe is defined. + // Started indicates whether the container has finished its postStart lifecycle hook + // and passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered + // successful. Resets to false when the container is restarted, or if kubelet + // loses state temporarily. In both cases, startup probes will run again. + // Is always true when no startupProbe is defined and container is running and + // has passed the postStart lifecycle hook. The null value must be treated the + // same as false. // +optional optional bool started = 9; + + // AllocatedResources represents the compute resources allocated for this container by the + // node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission + // and after successfully admitting desired pod resize. + // +featureGate=InPlacePodVerticalScaling + // +optional + map allocatedResources = 10; + + // Resources represents the compute resource requests and limits that have been successfully + // enacted on the running container after it has been started or has been successfully resized. + // +featureGate=InPlacePodVerticalScaling + // +optional + optional ResourceRequirements resources = 11; } // DaemonEndpoint contains information about a single Daemon endpoint. @@ -1001,7 +1105,7 @@ message EmptyDirVolumeSource { // The maximum usage on memory medium EmptyDir would be the minimum value between // the SizeLimit specified here and the sum of memory limits of all containers in a pod. // The default is nil which means that the limit is undefined. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity sizeLimit = 2; } @@ -1010,11 +1114,8 @@ message EmptyDirVolumeSource { // +structType=atomic message EndpointAddress { // The IP of this endpoint. - // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), - // or link-local multicast ((224.0.0.0/24). - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, See #4447. + // May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), + // or link-local multicast (224.0.0.0/24 or ff02::/16). optional string ip = 1; // The Hostname of this endpoint @@ -1050,10 +1151,19 @@ message EndpointPort { optional string protocol = 3; // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional optional string appProtocol = 4; @@ -1062,13 +1172,16 @@ message EndpointPort { // EndpointSubset is a group of addresses with a common set of ports. The // expanded set of endpoints is the Cartesian product of Addresses x Ports. // For example, given: -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// } +// +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// // The resulting set of endpoints can be viewed as: -// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], -// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +// +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] message EndpointSubset { // IP addresses which offer the related ports that are marked as ready. These endpoints // should be considered safe for load balancers and clients to utilize. @@ -1087,17 +1200,18 @@ message EndpointSubset { } // Endpoints is a collection of endpoints that implement the actual service. Example: -// Name: "mysvc", -// Subsets: [ -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// }, -// { -// Addresses: [{"ip": "10.10.3.3"}], -// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] -// }, -// ] +// +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] message Endpoints { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata @@ -1192,8 +1306,6 @@ message EnvVarSource { // // To add an ephemeral container, use the ephemeralcontainers subresource of an existing // Pod. Ephemeral containers may not be removed or restarted. -// -// This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate. message EphemeralContainer { // Ephemeral containers have all of the fields of Container, plus additional fields // specific to ephemeral containers. Fields in common with Container are in the @@ -1283,6 +1395,20 @@ message EphemeralContainerCommon { // +optional optional ResourceRequirements resources = 8; + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + repeated ContainerResizePolicy resizePolicy = 23; + + // Restart policy for the container to manage the restart behavior of each + // container within a pod. + // This may only be set for init containers. You cannot set this field on + // ephemeral containers. + // +featureGate=SidecarContainers + // +optional + optional string restartPolicy = 24; + // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. // Cannot be updated. // +optional @@ -1750,7 +1876,8 @@ message HTTPGetAction { // HTTPHeader describes a custom header to be used in HTTP probes message HTTPHeader { - // The header field name + // The header field name. + // This will be canonicalized upon output, so case-variant names will be understood as the same header. optional string name = 1; // The header field value @@ -1767,6 +1894,12 @@ message HostAlias { repeated string hostnames = 2; } +// HostIP represents a single IP address allocated to the host. +message HostIP { + // IP is the IP address assigned to the host + optional string ip = 1; +} + // Represents a host path mapped into a pod. // Host path volumes do not support ownership management or SELinux relabeling. message HostPathVolumeSource { @@ -2402,7 +2535,7 @@ message NodeSpec { // +optional repeated Taint taints = 5; - // Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed from Kubelets as of 1.24 and will be fully removed in 1.26. + // Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed. // +optional optional NodeConfigSource configSource = 6; @@ -2442,7 +2575,11 @@ message NodeStatus { // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses // Note: This field is declared as mergeable, but the merge key is not sufficiently // unique, which can cause data corruption when it is merged. Callers should instead - // use a full-replacement patch. See http://pr.k8s.io/79391 for an example. + // use a full-replacement patch. See https://pr.k8s.io/79391 for an example. + // Consumers should assume that addresses can change during the + // lifetime of a Node. However, there are some exceptions where this may not + // be possible, such as Pods that inherit a Node's address in its own status or + // consumers of the downward API (status.hostIP). // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -2535,6 +2672,7 @@ message ObjectFieldSelector { // and the version of the actual struct is irrelevant. // 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type // will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. +// // Instead of using this type, create a locally provided and used type that is well-focused on your reference. // For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -2623,7 +2761,7 @@ message PersistentVolumeClaim { optional PersistentVolumeClaimStatus status = 3; } -// PersistentVolumeClaimCondition contails details about state of pvc +// PersistentVolumeClaimCondition contains details about state of pvc message PersistentVolumeClaimCondition { optional string type = 1; @@ -2699,30 +2837,37 @@ message PersistentVolumeClaimSpec { // * An existing PVC (PersistentVolumeClaim) // If the provisioner or an external controller can support the specified data source, // it will create a new volume based on the contents of the specified data source. - // If the AnyVolumeDataSource feature gate is enabled, this field will always have - // the same contents as the DataSourceRef field. + // When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + // and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + // If the namespace is specified, then dataSourceRef will not be copied to dataSource. // +optional optional TypedLocalObjectReference dataSource = 7; // dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - // volume is desired. This may be any local object from a non-empty API group (non + // volume is desired. This may be any object from a non-empty API group (non // core object) or a PersistentVolumeClaim object. // When this field is specified, volume binding will only succeed if the type of // the specified object matches some installed volume populator or dynamic // provisioner. - // This field will replace the functionality of the DataSource field and as such + // This field will replace the functionality of the dataSource field and as such // if both fields are non-empty, they must have the same value. For backwards - // compatibility, both fields (DataSource and DataSourceRef) will be set to the same + // compatibility, when namespace isn't specified in dataSourceRef, + // both fields (dataSource and dataSourceRef) will be set to the same // value automatically if one of them is empty and the other is non-empty. - // There are two important differences between DataSource and DataSourceRef: - // * While DataSource only allows two specific types of objects, DataSourceRef + // When namespace is specified in dataSourceRef, + // dataSource isn't set to the same value and must be empty. + // There are three important differences between dataSource and dataSourceRef: + // * While dataSource only allows two specific types of objects, dataSourceRef // allows any non-core object, as well as PersistentVolumeClaim objects. - // * While DataSource ignores disallowed values (dropping them), DataSourceRef + // * While dataSource ignores disallowed values (dropping them), dataSourceRef // preserves all values, and generates an error if a disallowed value is // specified. + // * While dataSource only allows local objects, dataSourceRef allows objects + // in any namespaces. // (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + // (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. // +optional - optional TypedLocalObjectReference dataSourceRef = 8; + optional TypedObjectReference dataSourceRef = 8; } // PersistentVolumeClaimStatus is the current status of a persistent volume claim. @@ -2747,25 +2892,71 @@ message PersistentVolumeClaimStatus { // +patchStrategy=merge repeated PersistentVolumeClaimCondition conditions = 4; - // allocatedResources is the storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may - // be larger than the actual capacity when a volume expansion operation is requested. + // allocatedResources tracks the resources allocated to a PVC including its capacity. + // Key names follow standard Kubernetes label syntax. Valid values are either: + // * Un-prefixed keys: + // - storage - the capacity of the volume. + // * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource" + // Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered + // reserved and hence may not be used. + // + // Capacity reported here may be larger than the actual capacity when a volume expansion operation + // is requested. // For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. // If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. // If a volume expansion capacity request is lowered, allocatedResources is only // lowered if there are no expansion operations in progress and if the actual volume capacity // is equal or lower than the requested capacity. + // + // A controller that receives PVC update with previously unknown resourceName + // should ignore the update for the purpose it was designed. For example - a controller that + // only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid + // resources associated with PVC. + // // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. // +featureGate=RecoverVolumeExpansionFailure // +optional map allocatedResources = 5; - // resizeStatus stores status of resize operation. - // ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty - // string by resize controller or kubelet. + // allocatedResourceStatuses stores status of resource being resized for the given PVC. + // Key names follow standard Kubernetes label syntax. Valid values are either: + // * Un-prefixed keys: + // - storage - the capacity of the volume. + // * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource" + // Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered + // reserved and hence may not be used. + // + // ClaimResourceStatus can be in any of following states: + // - ControllerResizeInProgress: + // State set when resize controller starts resizing the volume in control-plane. + // - ControllerResizeFailed: + // State set when resize has failed in resize controller with a terminal error. + // - NodeResizePending: + // State set when resize controller has finished resizing the volume but further resizing of + // volume is needed on the node. + // - NodeResizeInProgress: + // State set when kubelet starts resizing the volume. + // - NodeResizeFailed: + // State set when resizing has failed in kubelet with a terminal error. Transient errors don't set + // NodeResizeFailed. + // For example: if expanding a PVC for more capacity - this field can be one of the following states: + // - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeInProgress" + // - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeFailed" + // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizePending" + // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeInProgress" + // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeFailed" + // When this field is not set, it means that no resize operation is in progress for the given PVC. + // + // A controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus + // should ignore the update for the purpose it was designed. For example - a controller that + // only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid + // resources associated with PVC. + // // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. // +featureGate=RecoverVolumeExpansionFailure + // +mapType=granular // +optional - optional string resizeStatus = 6; + map allocatedResourceStatuses = 7; } // PersistentVolumeClaimTemplate is used to produce @@ -2939,6 +3130,7 @@ message PersistentVolumeSpec { // claim.VolumeName is the authoritative bind between PV and PVC. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding // +optional + // +structType=granular optional ObjectReference claimRef = 4; // persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. @@ -2986,6 +3178,13 @@ message PersistentVolumeStatus { // for machine parsing and tidy display in the CLI. // +optional optional string reason = 3; + + // lastPhaseTransitionTime is the time the phase transitioned from one to another + // and automatically resets to current time everytime a volume phase transitions. + // This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature. + // +featureGate=PersistentVolumeLastPhaseTransitionTime + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastPhaseTransitionTime = 4; } // Represents a Photon Controller persistent disk resource. @@ -3230,11 +3429,9 @@ message PodExecOptions { repeated string command = 6; } -// IP address information for entries in the (plural) PodIPs field. -// Each entry includes: -// IP: An IP address allocated to the pod. Routable at least within the cluster. +// PodIP represents a single IP address allocated to the pod. message PodIP { - // ip is an IP address (IPv4 or IPv6) assigned to the pod + // IP is the IP address assigned to the pod optional string ip = 1; } @@ -3339,6 +3536,43 @@ message PodReadinessGate { optional string conditionType = 1; } +// PodResourceClaim references exactly one ResourceClaim through a ClaimSource. +// It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. +// Containers that need access to the ResourceClaim reference it with this name. +message PodResourceClaim { + // Name uniquely identifies this resource claim inside the pod. + // This must be a DNS_LABEL. + optional string name = 1; + + // Source describes where to find the ResourceClaim. + optional ClaimSource source = 2; +} + +// PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim +// which references a ResourceClaimTemplate. It stores the generated name for +// the corresponding ResourceClaim. +message PodResourceClaimStatus { + // Name uniquely identifies this resource claim inside the pod. + // This must match the name of an entry in pod.spec.resourceClaims, + // which implies that the string must be a DNS_LABEL. + optional string name = 1; + + // ResourceClaimName is the name of the ResourceClaim that was + // generated for the Pod in the namespace of the Pod. It this is + // unset, then generating a ResourceClaim was not necessary. The + // pod.spec.resourceClaims entry can be ignored in this case. + // + // +optional + optional string resourceClaimName = 2; +} + +// PodSchedulingGate is associated to a Pod to guard its scheduling. +message PodSchedulingGate { + // Name of the scheduling gate. + // Each scheduling gate must have a unique name field. + optional string name = 1; +} + // PodSecurityContext holds pod-level security attributes and common container settings. // Some fields are also present in container.securityContext. Field values of // container.securityContext take precedence over field values of PodSecurityContext. @@ -3387,8 +3621,11 @@ message PodSecurityContext { optional bool runAsNonRoot = 3; // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID. If unspecified, no groups will be added to - // any container. + // to the container's primary GID, the fsGroup (if specified), and group memberships + // defined in the container image for the uid of the container process. If unspecified, + // no additional groups are added to any container. Note that group memberships + // defined in the container image for the uid of the container process are still effective, + // even if they are not included in this list. // Note that this field cannot be set when spec.os.name is windows. // +optional repeated int64 supplementalGroups = 4; @@ -3474,14 +3711,13 @@ message PodSpec { // pod to perform user-initiated actions such as debugging. This list cannot be specified when // creating a pod, and it cannot be modified by updating the pod spec. In order to add an // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. - // This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate. // +optional // +patchMergeKey=name // +patchStrategy=merge repeated EphemeralContainer ephemeralContainers = 34; // Restart policy for all containers within the pod. - // One of Always, OnFailure, Never. + // One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. // Default to Always. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy // +optional @@ -3700,6 +3936,7 @@ message PodSpec { // If the OS field is set to windows, following fields must be unset: // - spec.hostPID // - spec.hostIPC + // - spec.hostUsers // - spec.securityContext.seLinuxOptions // - spec.securityContext.seccompProfile // - spec.securityContext.fsGroup @@ -3719,8 +3956,54 @@ message PodSpec { // - spec.containers[*].securityContext.runAsUser // - spec.containers[*].securityContext.runAsGroup // +optional - // This is a beta field and requires the IdentifyPodOS feature optional PodOS os = 36; + + // Use the host's user namespace. + // Optional: Default to true. + // If set to true or not present, the pod will be run in the host user namespace, useful + // for when the pod needs a feature only available to the host user namespace, such as + // loading a kernel module with CAP_SYS_MODULE. + // When set to false, a new userns is created for the pod. Setting false is useful for + // mitigating container breakout vulnerabilities even allowing users to run their + // containers as root without actually having root privileges on the host. + // This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + // +k8s:conversion-gen=false + // +optional + optional bool hostUsers = 37; + + // SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + // If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + // scheduler will not attempt to schedule the pod. + // + // SchedulingGates can only be set at pod creation time, and be removed only afterwards. + // + // This is a beta feature enabled by the PodSchedulingReadiness feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=PodSchedulingReadiness + // +optional + repeated PodSchedulingGate schedulingGates = 38; + + // ResourceClaims defines which ResourceClaims must be allocated + // and reserved before the Pod is allowed to start. The resources + // will be made available to those containers which consume them + // by name. + // + // This is an alpha field and requires enabling the + // DynamicResourceAllocation feature gate. + // + // This field is immutable. + // + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + // +listType=map + // +listMapKey=name + // +featureGate=DynamicResourceAllocation + // +optional + repeated PodResourceClaim resourceClaims = 39; } // PodStatus represents information about the status of a pod. Status may trail the actual @@ -3774,11 +4057,23 @@ message PodStatus { // +optional optional string nominatedNodeName = 11; - // IP address of the host to which the pod is assigned. Empty if not yet scheduled. + // hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. + // A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will + // not be updated even if there is a node is assigned to pod // +optional optional string hostIP = 5; - // IP address allocated to the pod. Routable at least within the cluster. + // hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must + // match the hostIP field. This list is empty if the pod has not started yet. + // A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will + // not be updated even if there is a node is assigned to this pod. + // +optional + // +patchStrategy=merge + // +patchMergeKey=ip + // +listType=atomic + repeated HostIP hostIPs = 16; + + // podIP address allocated to the pod. Routable at least within the cluster. // Empty if not yet allocated. // +optional optional string podIP = 6; @@ -3809,14 +4104,29 @@ message PodStatus { // The Quality of Service (QOS) classification assigned to the pod based on resource requirements // See PodQOSClass type for available QOS classes - // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes // +optional optional string qosClass = 9; // Status for any ephemeral containers that have run in this pod. - // This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate. // +optional repeated ContainerStatus ephemeralContainerStatuses = 13; + + // Status of resources resize desired for pod's containers. + // It is empty if no resources resize is pending. + // Any changes to container resources will automatically set this to "Proposed" + // +featureGate=InPlacePodVerticalScaling + // +optional + optional string resize = 14; + + // Status of resource claims. + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + // +listType=map + // +listMapKey=name + // +featureGate=DynamicResourceAllocation + // +optional + repeated PodResourceClaimStatus resourceClaimStatuses = 15; } // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded @@ -4009,8 +4319,6 @@ message ProbeHandler { optional TCPSocketAction tcpSocket = 3; // GRPC specifies an action involving a GRPC port. - // This is a beta field and requires enabling GRPCContainerProbe feature gate. - // +featureGate=GRPCContainerProbe // +optional optional GRPCAction grpc = 4; } @@ -4260,6 +4568,7 @@ message ReplicationControllerSpec { // Template is the object that describes the pod that will be created if // insufficient replicas are detected. This takes precedence over a TemplateRef. + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional optional PodTemplateSpec template = 3; @@ -4268,7 +4577,7 @@ message ReplicationControllerSpec { // ReplicationControllerStatus represents the current status of a replication // controller. message ReplicationControllerStatus { - // Replicas is the most recently oberved number of replicas. + // Replicas is the most recently observed number of replicas. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller optional int32 replicas = 1; @@ -4295,6 +4604,14 @@ message ReplicationControllerStatus { repeated ReplicationControllerCondition conditions = 6; } +// ResourceClaim references one entry in PodSpec.ResourceClaims. +message ResourceClaim { + // Name must match the name of one entry in pod.spec.resourceClaims of + // the Pod where this field is used. It makes that resource available + // inside a container. + optional string name = 1; +} + // ResourceFieldSelector represents container resources (cpu, memory) and their output format // +structType=atomic message ResourceFieldSelector { @@ -4380,10 +4697,24 @@ message ResourceRequirements { // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value. + // otherwise to an implementation-defined value. Requests cannot exceed Limits. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional map requests = 2; + + // Claims lists the names of resources, defined in spec.resourceClaims, + // that are used by this container. + // + // This is an alpha field and requires enabling the + // DynamicResourceAllocation feature gate. + // + // This field is immutable. It can only be set for containers. + // + // +listType=map + // +listMapKey=name + // +featureGate=DynamicResourceAllocation + // +optional + repeated ResourceClaim claims = 3; } // SELinuxOptions are the labels to be applied to the container @@ -4540,7 +4871,7 @@ message SeccompProfile { // localhostProfile indicates a profile defined in a file on the node should be used. // The profile must be preconfigured on the node to work. // Must be a descending path, relative to the kubelet's configured seccomp profile location. - // Must only be set if type is "Localhost". + // Must be set if type is "Localhost". Must NOT be set for any other type. // +optional optional string localhostProfile = 2; } @@ -4911,10 +5242,19 @@ message ServicePort { optional string protocol = 2; // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional optional string appProtocol = 6; @@ -5062,10 +5402,9 @@ message ServiceSpec { // This feature depends on whether the underlying cloud-provider supports specifying // the loadBalancerIP when a load balancer is created. // This field will be ignored if the cloud-provider does not support the feature. - // Deprecated: This field was under-specified and its meaning varies across implementations, - // and it cannot support dual-stack. - // As of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available. - // This field may be removed in a future API version. + // Deprecated: This field was under-specified and its meaning varies across implementations. + // Using it is non-portable and it may not support dual-stack. + // Users are encouraged to use implementation-specific annotations when available. // +optional optional string loadBalancerIP = 8; @@ -5083,12 +5422,19 @@ message ServiceSpec { // +optional optional string externalName = 10; - // externalTrafficPolicy denotes if this Service desires to route external - // traffic to node-local or cluster-wide endpoints. "Local" preserves the - // client source IP and avoids a second hop for LoadBalancer and Nodeport - // type services, but risks potentially imbalanced traffic spreading. - // "Cluster" obscures the client source IP and may cause a second hop to - // another node, but should have good overall load-spreading. + // externalTrafficPolicy describes how nodes distribute service traffic they + // receive on one of the Service's "externally-facing" addresses (NodePorts, + // ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + // the service in a way that assumes that external load balancers will take care + // of balancing the service traffic between nodes, and so each node will deliver + // traffic only to the node-local endpoints of the service, without masquerading + // the client source IP. (Traffic mistakenly sent to a node with no endpoints will + // be dropped.) The default value, "Cluster", uses the standard behavior of + // routing to all endpoints evenly (possibly modified by topology and other + // features). Note that traffic sent to an External IP or LoadBalancer IP from + // within the cluster will always get "Cluster" semantics, but clients sending to + // a NodePort from within the cluster may need to take traffic policy into account + // when picking a node. // +optional optional string externalTrafficPolicy = 11; @@ -5101,6 +5447,7 @@ message ServiceSpec { // service or not. If this field is specified when creating a Service // which does not need it, creation will fail. This field will be wiped // when updating a Service to no longer need it (e.g. changing type). + // This field cannot be updated once set. // +optional optional int32 healthCheckNodePort = 12; @@ -5170,17 +5517,15 @@ message ServiceSpec { // implementation (e.g. cloud providers) should ignore Services that set this field. // This field can only be set when creating or updating a Service to type 'LoadBalancer'. // Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. - // +featureGate=LoadBalancerClass // +optional optional string loadBalancerClass = 21; - // InternalTrafficPolicy specifies if the cluster internal traffic - // should be routed to all endpoints or node-local endpoints only. - // "Cluster" routes internal traffic to a Service to all endpoints. - // "Local" routes traffic to node-local endpoints only, traffic is - // dropped if no node-local endpoints are ready. - // The default value is "Cluster". - // +featureGate=ServiceInternalTrafficPolicy + // InternalTrafficPolicy describes how nodes distribute service traffic they + // receive on the ClusterIP. If set to "Local", the proxy will assume that pods + // only want to talk to endpoints of the service on the same node as the pod, + // dropping the traffic if there are no local endpoints. The default value, + // "Cluster", uses the standard behavior of routing to all endpoints evenly + // (possibly modified by topology and other features). // +optional optional string internalTrafficPolicy = 22; } @@ -5399,7 +5744,8 @@ message TopologySpreadConstraint { // We consider each as a "bucket", and try to put balanced number // of pods into each bucket. // We define a domain as a particular instance of a topology. - // Also, we define an eligible domain as a domain whose nodes match the node selector. + // Also, we define an eligible domain as a domain whose nodes meet the requirements of + // nodeAffinityPolicy and nodeTaintsPolicy. // e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. // And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. // It's a required field. @@ -5457,9 +5803,44 @@ message TopologySpreadConstraint { // because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, // it will violate MaxSkew. // - // This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate. + // This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). // +optional optional int32 minDomains = 5; + + // NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + // when calculating pod topology spread skew. Options are: + // - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + // - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + // + // If this value is nil, the behavior is equivalent to the Honor policy. + // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + // +optional + optional string nodeAffinityPolicy = 6; + + // NodeTaintsPolicy indicates how we will treat node taints when calculating + // pod topology spread skew. Options are: + // - Honor: nodes without taints, along with tainted nodes for which the incoming pod + // has a toleration, are included. + // - Ignore: node taints are ignored. All nodes are included. + // + // If this value is nil, the behavior is equivalent to the Ignore policy. + // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + // +optional + optional string nodeTaintsPolicy = 7; + + // MatchLabelKeys is a set of pod label keys to select the pods over which + // spreading will be calculated. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are ANDed with labelSelector + // to select the group of existing pods over which spreading will be calculated + // for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + // MatchLabelKeys cannot be set when LabelSelector isn't set. + // Keys that don't exist in the incoming pod labels will + // be ignored. A null or empty list means only match against labelSelector. + // + // This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + // +listType=atomic + // +optional + repeated string matchLabelKeys = 8; } // TypedLocalObjectReference contains enough information to let you locate the @@ -5479,6 +5860,27 @@ message TypedLocalObjectReference { optional string name = 3; } +message TypedObjectReference { + // APIGroup is the group for the resource being referenced. + // If APIGroup is not specified, the specified Kind must be in the core API group. + // For any other third-party types, APIGroup is required. + // +optional + optional string apiGroup = 1; + + // Kind is the type of resource being referenced + optional string kind = 2; + + // Name is the name of resource being referenced + optional string name = 3; + + // Namespace is the namespace of resource being referenced + // Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + // (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + // +featureGate=CrossNamespaceVolumeDataSource + // +optional + optional string namespace = 4; +} + // Volume represents a named volume in a pod that may be accessed by any container in the pod. message Volume { // name of the volume. @@ -5777,12 +6179,9 @@ message WindowsSecurityContextOptions { optional string runAsUserName = 3; // HostProcess determines if a container should be run as a 'Host Process' container. - // This field is alpha-level and will only be honored by components that enable the - // WindowsHostProcessContainers feature flag. Setting this field without the feature - // flag will result in errors when validating the Pod. All of a Pod's containers must - // have the same effective HostProcess value (it is not allowed to have a mix of HostProcess - // containers and non-HostProcess containers). In addition, if HostProcess is true - // then HostNetwork must also be set to true. + // All of a Pod's containers must have the same effective HostProcess value + // (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + // In addition, if HostProcess is true then HostNetwork must also be set to true. // +optional optional bool hostProcess = 4; } diff --git a/vendor/k8s.io/api/core/v1/toleration.go b/vendor/k8s.io/api/core/v1/toleration.go index b203d335b..e803d518b 100644 --- a/vendor/k8s.io/api/core/v1/toleration.go +++ b/vendor/k8s.io/api/core/v1/toleration.go @@ -28,10 +28,11 @@ func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { // ToleratesTaint checks if the toleration tolerates the taint. // The matching follows the rules below: -// (1) Empty toleration.effect means to match all taint effects, +// +// 1. Empty toleration.effect means to match all taint effects, // otherwise taint effect must equal to toleration.effect. -// (2) If toleration.operator is 'Exists', it means to match all taint values. -// (3) Empty toleration.key means to match all taint keys. +// 2. If toleration.operator is 'Exists', it means to match all taint values. +// 3. Empty toleration.key means to match all taint keys. // If toleration.key is empty, toleration.operator must be 'Exists'; // this combination means to match all taint values and all taint keys. func (t *Toleration) ToleratesTaint(taint *Taint) bool { diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index c2a3c67b2..9e05c2235 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -337,6 +337,7 @@ type PersistentVolumeSpec struct { // claim.VolumeName is the authoritative bind between PV and PVC. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding // +optional + // +structType=granular ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"` // persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. // Valid options are Retain (default for manually created PersistentVolumes), Delete (default @@ -410,6 +411,12 @@ type PersistentVolumeStatus struct { // for machine parsing and tidy display in the CLI. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // lastPhaseTransitionTime is the time the phase transitioned from one to another + // and automatically resets to current time everytime a volume phase transitions. + // This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature. + // +featureGate=PersistentVolumeLastPhaseTransitionTime + // +optional + LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastPhaseTransitionTime"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -496,29 +503,54 @@ type PersistentVolumeClaimSpec struct { // * An existing PVC (PersistentVolumeClaim) // If the provisioner or an external controller can support the specified data source, // it will create a new volume based on the contents of the specified data source. - // If the AnyVolumeDataSource feature gate is enabled, this field will always have - // the same contents as the DataSourceRef field. + // When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + // and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + // If the namespace is specified, then dataSourceRef will not be copied to dataSource. // +optional DataSource *TypedLocalObjectReference `json:"dataSource,omitempty" protobuf:"bytes,7,opt,name=dataSource"` // dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - // volume is desired. This may be any local object from a non-empty API group (non + // volume is desired. This may be any object from a non-empty API group (non // core object) or a PersistentVolumeClaim object. // When this field is specified, volume binding will only succeed if the type of // the specified object matches some installed volume populator or dynamic // provisioner. - // This field will replace the functionality of the DataSource field and as such + // This field will replace the functionality of the dataSource field and as such // if both fields are non-empty, they must have the same value. For backwards - // compatibility, both fields (DataSource and DataSourceRef) will be set to the same + // compatibility, when namespace isn't specified in dataSourceRef, + // both fields (dataSource and dataSourceRef) will be set to the same // value automatically if one of them is empty and the other is non-empty. - // There are two important differences between DataSource and DataSourceRef: - // * While DataSource only allows two specific types of objects, DataSourceRef + // When namespace is specified in dataSourceRef, + // dataSource isn't set to the same value and must be empty. + // There are three important differences between dataSource and dataSourceRef: + // * While dataSource only allows two specific types of objects, dataSourceRef // allows any non-core object, as well as PersistentVolumeClaim objects. - // * While DataSource ignores disallowed values (dropping them), DataSourceRef + // * While dataSource ignores disallowed values (dropping them), dataSourceRef // preserves all values, and generates an error if a disallowed value is // specified. + // * While dataSource only allows local objects, dataSourceRef allows objects + // in any namespaces. // (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + // (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + // +optional + DataSourceRef *TypedObjectReference `json:"dataSourceRef,omitempty" protobuf:"bytes,8,opt,name=dataSourceRef"` +} + +type TypedObjectReference struct { + // APIGroup is the group for the resource being referenced. + // If APIGroup is not specified, the specified Kind must be in the core API group. + // For any other third-party types, APIGroup is required. + // +optional + APIGroup *string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"` + // Kind is the type of resource being referenced + Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` + // Name is the name of resource being referenced + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // Namespace is the namespace of resource being referenced + // Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + // (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + // +featureGate=CrossNamespaceVolumeDataSource // +optional - DataSourceRef *TypedLocalObjectReference `json:"dataSourceRef,omitempty" protobuf:"bytes,8,opt,name=dataSourceRef"` + Namespace *string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` } // PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type @@ -532,26 +564,30 @@ const ( ) // +enum -type PersistentVolumeClaimResizeStatus string +// When a controller receives persistentvolume claim update with ClaimResourceStatus for a resource +// that it does not recognizes, then it should ignore that update and let other controllers +// handle it. +type ClaimResourceStatus string const ( - // When expansion is complete, the empty string is set by resize controller or kubelet. - PersistentVolumeClaimNoExpansionInProgress PersistentVolumeClaimResizeStatus = "" - // State set when resize controller starts expanding the volume in control-plane - PersistentVolumeClaimControllerExpansionInProgress PersistentVolumeClaimResizeStatus = "ControllerExpansionInProgress" - // State set when expansion has failed in resize controller with a terminal error. - // Transient errors such as timeout should not set this status and should leave ResizeStatus + // State set when resize controller starts resizing the volume in control-plane. + PersistentVolumeClaimControllerResizeInProgress ClaimResourceStatus = "ControllerResizeInProgress" + + // State set when resize has failed in resize controller with a terminal error. + // Transient errors such as timeout should not set this status and should leave allocatedResourceStatus // unmodified, so as resize controller can resume the volume expansion. - PersistentVolumeClaimControllerExpansionFailed PersistentVolumeClaimResizeStatus = "ControllerExpansionFailed" - // State set when resize controller has finished expanding the volume but further expansion is needed on the node. - PersistentVolumeClaimNodeExpansionPending PersistentVolumeClaimResizeStatus = "NodeExpansionPending" - // State set when kubelet starts expanding the volume. - PersistentVolumeClaimNodeExpansionInProgress PersistentVolumeClaimResizeStatus = "NodeExpansionInProgress" - // State set when expansion has failed in kubelet with a terminal error. Transient errors don't set NodeExpansionFailed. - PersistentVolumeClaimNodeExpansionFailed PersistentVolumeClaimResizeStatus = "NodeExpansionFailed" + PersistentVolumeClaimControllerResizeFailed ClaimResourceStatus = "ControllerResizeFailed" + + // State set when resize controller has finished resizing the volume but further resizing of volume + // is needed on the node. + PersistentVolumeClaimNodeResizePending ClaimResourceStatus = "NodeResizePending" + // State set when kubelet starts resizing the volume. + PersistentVolumeClaimNodeResizeInProgress ClaimResourceStatus = "NodeResizeInProgress" + // State set when resizing has failed in kubelet with a terminal error. Transient errors don't set NodeResizeFailed + PersistentVolumeClaimNodeResizeFailed ClaimResourceStatus = "NodeResizeFailed" ) -// PersistentVolumeClaimCondition contails details about state of pvc +// PersistentVolumeClaimCondition contains details about state of pvc type PersistentVolumeClaimCondition struct { Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"` Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` @@ -589,24 +625,74 @@ type PersistentVolumeClaimStatus struct { // +patchMergeKey=type // +patchStrategy=merge Conditions []PersistentVolumeClaimCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` - // allocatedResources is the storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may - // be larger than the actual capacity when a volume expansion operation is requested. + // allocatedResources tracks the resources allocated to a PVC including its capacity. + // Key names follow standard Kubernetes label syntax. Valid values are either: + // * Un-prefixed keys: + // - storage - the capacity of the volume. + // * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource" + // Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered + // reserved and hence may not be used. + // + // Capacity reported here may be larger than the actual capacity when a volume expansion operation + // is requested. // For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. // If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. // If a volume expansion capacity request is lowered, allocatedResources is only // lowered if there are no expansion operations in progress and if the actual volume capacity // is equal or lower than the requested capacity. + // + // A controller that receives PVC update with previously unknown resourceName + // should ignore the update for the purpose it was designed. For example - a controller that + // only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid + // resources associated with PVC. + // // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. // +featureGate=RecoverVolumeExpansionFailure // +optional AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,5,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"` - // resizeStatus stores status of resize operation. - // ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty - // string by resize controller or kubelet. + + // resizestatus is tombstoned since the field was replaced by allocatedResourceStatus + // ResizeStatus *PersistentVolumeClaimResizeStatus `json:"resizeStatus,omitempty" protobuf:"bytes,6,opt,name=resizeStatus,casttype=PersistentVolumeClaimResizeStatus"` + + // allocatedResourceStatuses stores status of resource being resized for the given PVC. + // Key names follow standard Kubernetes label syntax. Valid values are either: + // * Un-prefixed keys: + // - storage - the capacity of the volume. + // * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource" + // Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered + // reserved and hence may not be used. + // + // ClaimResourceStatus can be in any of following states: + // - ControllerResizeInProgress: + // State set when resize controller starts resizing the volume in control-plane. + // - ControllerResizeFailed: + // State set when resize has failed in resize controller with a terminal error. + // - NodeResizePending: + // State set when resize controller has finished resizing the volume but further resizing of + // volume is needed on the node. + // - NodeResizeInProgress: + // State set when kubelet starts resizing the volume. + // - NodeResizeFailed: + // State set when resizing has failed in kubelet with a terminal error. Transient errors don't set + // NodeResizeFailed. + // For example: if expanding a PVC for more capacity - this field can be one of the following states: + // - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeInProgress" + // - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeFailed" + // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizePending" + // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeInProgress" + // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeFailed" + // When this field is not set, it means that no resize operation is in progress for the given PVC. + // + // A controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus + // should ignore the update for the purpose it was designed. For example - a controller that + // only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid + // resources associated with PVC. + // // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. // +featureGate=RecoverVolumeExpansionFailure + // +mapType=granular // +optional - ResizeStatus *PersistentVolumeClaimResizeStatus `json:"resizeStatus,omitempty" protobuf:"bytes,6,opt,name=resizeStatus,casttype=PersistentVolumeClaimResizeStatus"` + AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"` } // +enum @@ -709,7 +795,7 @@ type EmptyDirVolumeSource struct { // The maximum usage on memory medium EmptyDir would be the minimum value between // the SizeLimit specified here and the sum of memory limits of all containers in a pod. // The default is nil which means that the limit is undefined. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"` } @@ -1662,7 +1748,7 @@ type ServiceAccountTokenProjection struct { // must identify itself with an identifier specified in the audience of the // token, and otherwise should reject the token. The audience defaults to the // identifier of the apiserver. - //+optional + // +optional Audience string `json:"audience,omitempty" protobuf:"bytes,1,rep,name=audience"` // expirationSeconds is the requested duration of validity of the service // account token. As the token approaches expiration, the kubelet volume @@ -1670,7 +1756,7 @@ type ServiceAccountTokenProjection struct { // start trying to rotate the token if the token is older than 80 percent of // its time to live or if the token is older than 24 hours.Defaults to 1 hour // and must be at least 10 minutes. - //+optional + // +optional ExpirationSeconds *int64 `json:"expirationSeconds,omitempty" protobuf:"varint,2,opt,name=expirationSeconds"` // path is the path relative to the mount point of the file to project the // token into. @@ -1800,11 +1886,20 @@ type CSIPersistentVolumeSource struct { // controllerExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerExpandVolume call. - // This is an alpha field and requires enabling ExpandCSIVolumes feature gate. // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional ControllerExpandSecretRef *SecretReference `json:"controllerExpandSecretRef,omitempty" protobuf:"bytes,9,opt,name=controllerExpandSecretRef"` + + // nodeExpandSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // NodeExpandVolume call. + // This is a beta field which is enabled default by CSINodeExpandSecret feature gate. + // This field is optional, may be omitted if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +featureGate=CSINodeExpandSecret + // +optional + NodeExpandSecretRef *SecretReference `json:"nodeExpandSecretRef,omitempty" protobuf:"bytes,10,opt,name=nodeExpandSecretRef"` } // Represents a source location of a volume to mount, managed by an external CSI driver @@ -2102,7 +2197,8 @@ type SecretEnvSource struct { // HTTPHeader describes a custom header to be used in HTTP probes type HTTPHeader struct { - // The header field name + // The header field name. + // This will be canonicalized upon output, so case-variant names will be understood as the same header. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // The header field value Value string `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -2229,6 +2325,33 @@ const ( PullIfNotPresent PullPolicy = "IfNotPresent" ) +// ResourceResizeRestartPolicy specifies how to handle container resource resize. +type ResourceResizeRestartPolicy string + +// These are the valid resource resize restart policy values: +const ( + // 'NotRequired' means Kubernetes will try to resize the container + // without restarting it, if possible. Kubernetes may however choose to + // restart the container if it is unable to actuate resize without a + // restart. For e.g. the runtime doesn't support restart-free resizing. + NotRequired ResourceResizeRestartPolicy = "NotRequired" + // 'RestartContainer' means Kubernetes will resize the container in-place + // by stopping and starting the container when new resources are applied. + // This is needed for legacy applications. For e.g. java apps using the + // -xmxN flag which are unable to use resized memory without restarting. + RestartContainer ResourceResizeRestartPolicy = "RestartContainer" +) + +// ContainerResizePolicy represents resource resize policy for the container. +type ContainerResizePolicy struct { + // Name of the resource to which this resource resize policy applies. + // Supported values: cpu, memory. + ResourceName ResourceName `json:"resourceName" protobuf:"bytes,1,opt,name=resourceName,casttype=ResourceName"` + // Restart policy to apply when specified resource is resized. + // If not specified, it defaults to NotRequired. + RestartPolicy ResourceResizeRestartPolicy `json:"restartPolicy" protobuf:"bytes,2,opt,name=restartPolicy,casttype=ResourceResizeRestartPolicy"` +} + // PreemptionPolicy describes a policy for if/when to preempt a pod. // +enum type PreemptionPolicy string @@ -2275,10 +2398,31 @@ type ResourceRequirements struct { Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"` // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value. + // otherwise to an implementation-defined value. Requests cannot exceed Limits. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"` + // Claims lists the names of resources, defined in spec.resourceClaims, + // that are used by this container. + // + // This is an alpha field and requires enabling the + // DynamicResourceAllocation feature gate. + // + // This field is immutable. It can only be set for containers. + // + // +listType=map + // +listMapKey=name + // +featureGate=DynamicResourceAllocation + // +optional + Claims []ResourceClaim `json:"claims,omitempty" protobuf:"bytes,3,opt,name=claims"` +} + +// ResourceClaim references one entry in PodSpec.ResourceClaims. +type ResourceClaim struct { + // Name must match the name of one entry in pod.spec.resourceClaims of + // the Pod where this field is used. It makes that resource available + // inside a container. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` } const ( @@ -2324,12 +2468,12 @@ type Container struct { // Cannot be updated. // +optional WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` - // List of ports to expose from the container. Exposing a port here gives - // the system additional information about the network connections a - // container uses, but is primarily informational. Not specifying a port here + // List of ports to expose from the container. Not specifying a port here // DOES NOT prevent that port from being exposed. Any port which is // listening on the default "0.0.0.0" address inside a container will be // accessible from the network. + // Modifying this array with strategic merge patch may corrupt the data. + // For more information See https://github.com/kubernetes/kubernetes/issues/108255. // Cannot be updated. // +optional // +patchMergeKey=containerPort @@ -2357,6 +2501,29 @@ type Container struct { // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"` + // RestartPolicy defines the restart behavior of individual containers in a pod. + // This field may only be set for init containers, and the only allowed value is "Always". + // For non-init containers or when this field is not specified, + // the restart behavior is defined by the Pod's restart policy and the container type. + // Setting the RestartPolicy as "Always" for the init container will have the following effect: + // this init container will be continually restarted on + // exit until all regular containers have terminated. Once all regular + // containers have completed, all init containers with restartPolicy "Always" + // will be shut down. This lifecycle differs from normal init containers and + // is often referred to as a "sidecar" container. Although this init + // container still starts in the init container sequence, it does not wait + // for the container to complete before proceeding to the next init + // container. Instead, the next init container starts immediately after this + // init container is started, or after any startupProbe has successfully + // completed. + // +featureGate=SidecarContainers + // +optional + RestartPolicy *ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"` // Pod volumes to mount into the container's filesystem. // Cannot be updated. // +optional @@ -2461,8 +2628,6 @@ type ProbeHandler struct { TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` // GRPC specifies an action involving a GRPC port. - // This is a beta field and requires enabling GRPCContainerProbe feature gate. - // +featureGate=GRPCContainerProbe // +optional GRPC *GRPCAction `json:"grpc,omitempty" protobuf:"bytes,4,opt,name=grpc"` } @@ -2576,33 +2741,66 @@ type ContainerState struct { // ContainerStatus contains details for the current status of this container. type ContainerStatus struct { - // This must be a DNS_LABEL. Each container in a pod must have a unique name. + // Name is a DNS_LABEL representing the unique name of the container. + // Each container in a pod must have a unique name across all container types. // Cannot be updated. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Details about the container's current condition. + // State holds details about the container's current condition. // +optional State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"` - // Details about the container's last termination condition. + // LastTerminationState holds the last termination state of the container to + // help debug container crashes and restarts. This field is not + // populated if the container is still running and RestartCount is 0. // +optional LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"` - // Specifies whether the container has passed its readiness probe. + // Ready specifies whether the container is currently passing its readiness check. + // The value will change as readiness probes keep executing. If no readiness + // probes are specified, this field defaults to true once the container is + // fully started (see Started field). + // + // The value is typically used to determine whether a container is ready to + // accept traffic. Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"` - // The number of times the container has been restarted. + // RestartCount holds the number of times the container has been restarted. + // Kubelet makes an effort to always increment the value, but there + // are cases when the state may be lost due to node restarts and then the value + // may be reset to 0. The value is never negative. RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"` - // The image the container is running. + // Image is the name of container image that the container is running. + // The container image may not match the image used in the PodSpec, + // as it may have been resolved by the runtime. // More info: https://kubernetes.io/docs/concepts/containers/images. Image string `json:"image" protobuf:"bytes,6,opt,name=image"` - // ImageID of the container's image. + // ImageID is the image ID of the container's image. The image ID may not + // match the image ID of the image used in the PodSpec, as it may have been + // resolved by the runtime. ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"` - // Container's ID in the format '://'. + // ContainerID is the ID of the container in the format '://'. + // Where type is a container runtime identifier, returned from Version call of CRI API + // (for example "containerd"). // +optional ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"` - // Specifies whether the container has passed its startup probe. - // Initialized as false, becomes true after startupProbe is considered successful. - // Resets to false when the container is restarted, or if kubelet loses state temporarily. - // Is always true when no startupProbe is defined. + // Started indicates whether the container has finished its postStart lifecycle hook + // and passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered + // successful. Resets to false when the container is restarted, or if kubelet + // loses state temporarily. In both cases, startup probes will run again. + // Is always true when no startupProbe is defined and container is running and + // has passed the postStart lifecycle hook. The null value must be treated the + // same as false. // +optional Started *bool `json:"started,omitempty" protobuf:"varint,9,opt,name=started"` + // AllocatedResources represents the compute resources allocated for this container by the + // node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission + // and after successfully admitting desired pod resize. + // +featureGate=InPlacePodVerticalScaling + // +optional + AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,10,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"` + // Resources represents the compute resource requests and limits that have been successfully + // enacted on the running container after it has been started or has been successfully resized. + // +featureGate=InPlacePodVerticalScaling + // +optional + Resources *ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,11,opt,name=resources"` } // PodPhase is a label for the condition of a pod at the current time. @@ -2644,6 +2842,9 @@ const ( PodReady PodConditionType = "Ready" // PodScheduled represents status of the scheduling process for this pod. PodScheduled PodConditionType = "PodScheduled" + // DisruptionTarget indicates the pod is about to be terminated due to a + // disruption (such as preemption, eviction API or garbage-collection). + DisruptionTarget PodConditionType = "DisruptionTarget" ) // These are reasons for a pod's transition to a condition. @@ -2651,6 +2852,22 @@ const ( // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler // can't schedule the pod right now, for example due to insufficient resources in the cluster. PodReasonUnschedulable = "Unschedulable" + + // PodReasonSchedulingGated reason in PodScheduled PodCondition means that the scheduler + // skips scheduling the pod because one or more scheduling gates are still present. + PodReasonSchedulingGated = "SchedulingGated" + + // PodReasonSchedulerError reason in PodScheduled PodCondition means that some internal error happens + // during scheduling, for example due to nodeAffinity parsing errors. + PodReasonSchedulerError = "SchedulerError" + + // TerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination + // is initiated by kubelet + PodReasonTerminationByKubelet = "TerminationByKubelet" + + // PodReasonPreemptionByScheduler reason in DisruptionTarget pod condition indicates that the + // disruption was initiated by scheduler's preemption. + PodReasonPreemptionByScheduler = "PreemptionByScheduler" ) // PodCondition contains details for the current condition of this pod. @@ -2676,6 +2893,20 @@ type PodCondition struct { Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` } +// PodResizeStatus shows status of desired resize of a pod's containers. +type PodResizeStatus string + +const ( + // Pod resources resize has been requested and will be evaluated by node. + PodResizeStatusProposed PodResizeStatus = "Proposed" + // Pod resources resize has been accepted by node and is being actuated. + PodResizeStatusInProgress PodResizeStatus = "InProgress" + // Node cannot resize the pod at this time and will keep retrying. + PodResizeStatusDeferred PodResizeStatus = "Deferred" + // Requested pod resize is not feasible and will not be re-evaluated. + PodResizeStatusInfeasible PodResizeStatus = "Infeasible" +) + // RestartPolicy describes how the container should be restarted. // Only one of the following restart policies may be specified. // If none of the following policies is specified, the default one @@ -2689,6 +2920,14 @@ const ( RestartPolicyNever RestartPolicy = "Never" ) +// ContainerRestartPolicy is the restart policy for a single container. +// This may only be set for init containers and only allowed value is "Always". +type ContainerRestartPolicy string + +const ( + ContainerRestartPolicyAlways ContainerRestartPolicy = "Always" +) + // DNSPolicy defines how a pod's DNS will be configured. // +enum type DNSPolicy string @@ -2725,7 +2964,7 @@ const ( // by the node selector terms. // +structType=atomic type NodeSelector struct { - //Required. A list of node selector terms. The terms are ORed. + // Required. A list of node selector terms. The terms are ORed. NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"` } @@ -3081,13 +3320,12 @@ type PodSpec struct { // pod to perform user-initiated actions such as debugging. This list cannot be specified when // creating a pod, and it cannot be modified by updating the pod spec. In order to add an // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. - // This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate. // +optional // +patchMergeKey=name // +patchStrategy=merge EphemeralContainers []EphemeralContainer `json:"ephemeralContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,34,rep,name=ephemeralContainers"` // Restart policy for all containers within the pod. - // One of Always, OnFailure, Never. + // One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. // Default to Always. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy // +optional @@ -3277,6 +3515,7 @@ type PodSpec struct { // If the OS field is set to windows, following fields must be unset: // - spec.hostPID // - spec.hostIPC + // - spec.hostUsers // - spec.securityContext.seLinuxOptions // - spec.securityContext.seccompProfile // - spec.securityContext.fsGroup @@ -3296,8 +3535,107 @@ type PodSpec struct { // - spec.containers[*].securityContext.runAsUser // - spec.containers[*].securityContext.runAsGroup // +optional - // This is a beta field and requires the IdentifyPodOS feature OS *PodOS `json:"os,omitempty" protobuf:"bytes,36,opt,name=os"` + + // Use the host's user namespace. + // Optional: Default to true. + // If set to true or not present, the pod will be run in the host user namespace, useful + // for when the pod needs a feature only available to the host user namespace, such as + // loading a kernel module with CAP_SYS_MODULE. + // When set to false, a new userns is created for the pod. Setting false is useful for + // mitigating container breakout vulnerabilities even allowing users to run their + // containers as root without actually having root privileges on the host. + // This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + // +k8s:conversion-gen=false + // +optional + HostUsers *bool `json:"hostUsers,omitempty" protobuf:"bytes,37,opt,name=hostUsers"` + + // SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + // If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + // scheduler will not attempt to schedule the pod. + // + // SchedulingGates can only be set at pod creation time, and be removed only afterwards. + // + // This is a beta feature enabled by the PodSchedulingReadiness feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=PodSchedulingReadiness + // +optional + SchedulingGates []PodSchedulingGate `json:"schedulingGates,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,38,opt,name=schedulingGates"` + // ResourceClaims defines which ResourceClaims must be allocated + // and reserved before the Pod is allowed to start. The resources + // will be made available to those containers which consume them + // by name. + // + // This is an alpha field and requires enabling the + // DynamicResourceAllocation feature gate. + // + // This field is immutable. + // + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + // +listType=map + // +listMapKey=name + // +featureGate=DynamicResourceAllocation + // +optional + ResourceClaims []PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"` +} + +// PodResourceClaim references exactly one ResourceClaim through a ClaimSource. +// It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. +// Containers that need access to the ResourceClaim reference it with this name. +type PodResourceClaim struct { + // Name uniquely identifies this resource claim inside the pod. + // This must be a DNS_LABEL. + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // Source describes where to find the ResourceClaim. + Source ClaimSource `json:"source,omitempty" protobuf:"bytes,2,name=source"` +} + +// ClaimSource describes a reference to a ResourceClaim. +// +// Exactly one of these fields should be set. Consumers of this type must +// treat an empty object as if it has an unknown value. +type ClaimSource struct { + // ResourceClaimName is the name of a ResourceClaim object in the same + // namespace as this pod. + ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,1,opt,name=resourceClaimName"` + + // ResourceClaimTemplateName is the name of a ResourceClaimTemplate + // object in the same namespace as this pod. + // + // The template will be used to create a new ResourceClaim, which will + // be bound to this pod. When this pod is deleted, the ResourceClaim + // will also be deleted. The pod name and resource name, along with a + // generated component, will be used to form a unique name for the + // ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + // + // This field is immutable and no changes will be made to the + // corresponding ResourceClaim by the control plane after creating the + // ResourceClaim. + ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty" protobuf:"bytes,2,opt,name=resourceClaimTemplateName"` +} + +// PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim +// which references a ResourceClaimTemplate. It stores the generated name for +// the corresponding ResourceClaim. +type PodResourceClaimStatus struct { + // Name uniquely identifies this resource claim inside the pod. + // This must match the name of an entry in pod.spec.resourceClaims, + // which implies that the string must be a DNS_LABEL. + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // ResourceClaimName is the name of the ResourceClaim that was + // generated for the Pod in the namespace of the Pod. It this is + // unset, then generating a ResourceClaim was not necessary. The + // pod.spec.resourceClaims entry can be ignored in this case. + // + // +optional + ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,2,opt,name=resourceClaimName"` } // OSName is the set of OS'es that can be used in OS. @@ -3318,6 +3656,13 @@ type PodOS struct { Name OSName `json:"name" protobuf:"bytes,1,opt,name=name"` } +// PodSchedulingGate is associated to a Pod to guard its scheduling. +type PodSchedulingGate struct { + // Name of the scheduling gate. + // Each scheduling gate must have a unique name field. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + // +enum type UnsatisfiableConstraintAction string @@ -3330,6 +3675,17 @@ const ( ScheduleAnyway UnsatisfiableConstraintAction = "ScheduleAnyway" ) +// NodeInclusionPolicy defines the type of node inclusion policy +// +enum +type NodeInclusionPolicy string + +const ( + // NodeInclusionPolicyIgnore means ignore this scheduling directive when calculating pod topology spread skew. + NodeInclusionPolicyIgnore NodeInclusionPolicy = "Ignore" + // NodeInclusionPolicyHonor means use this scheduling directive when calculating pod topology spread skew. + NodeInclusionPolicyHonor NodeInclusionPolicy = "Honor" +) + // TopologySpreadConstraint specifies how to spread matching pods among the given topology. type TopologySpreadConstraint struct { // MaxSkew describes the degree to which pods may be unevenly distributed. @@ -3358,7 +3714,8 @@ type TopologySpreadConstraint struct { // We consider each as a "bucket", and try to put balanced number // of pods into each bucket. // We define a domain as a particular instance of a topology. - // Also, we define an eligible domain as a domain whose nodes match the node selector. + // Also, we define an eligible domain as a domain whose nodes meet the requirements of + // nodeAffinityPolicy and nodeTaintsPolicy. // e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. // And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. // It's a required field. @@ -3413,9 +3770,41 @@ type TopologySpreadConstraint struct { // because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, // it will violate MaxSkew. // - // This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate. + // This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). // +optional MinDomains *int32 `json:"minDomains,omitempty" protobuf:"varint,5,opt,name=minDomains"` + // NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + // when calculating pod topology spread skew. Options are: + // - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + // - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + // + // If this value is nil, the behavior is equivalent to the Honor policy. + // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + // +optional + NodeAffinityPolicy *NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty" protobuf:"bytes,6,opt,name=nodeAffinityPolicy"` + // NodeTaintsPolicy indicates how we will treat node taints when calculating + // pod topology spread skew. Options are: + // - Honor: nodes without taints, along with tainted nodes for which the incoming pod + // has a toleration, are included. + // - Ignore: node taints are ignored. All nodes are included. + // + // If this value is nil, the behavior is equivalent to the Ignore policy. + // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + // +optional + NodeTaintsPolicy *NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty" protobuf:"bytes,7,opt,name=nodeTaintsPolicy"` + // MatchLabelKeys is a set of pod label keys to select the pods over which + // spreading will be calculated. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are ANDed with labelSelector + // to select the group of existing pods over which spreading will be calculated + // for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + // MatchLabelKeys cannot be set when LabelSelector isn't set. + // Keys that don't exist in the incoming pod labels will + // be ignored. A null or empty list means only match against labelSelector. + // + // This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + // +listType=atomic + // +optional + MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,8,opt,name=matchLabelKeys"` } const ( @@ -3492,8 +3881,11 @@ type PodSecurityContext struct { // +optional RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"` // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID. If unspecified, no groups will be added to - // any container. + // to the container's primary GID, the fsGroup (if specified), and group memberships + // defined in the container image for the uid of the container process. If unspecified, + // no additional groups are added to any container. Note that group memberships + // defined in the container image for the uid of the container process are still effective, + // even if they are not included in this list. // Note that this field cannot be set when spec.os.name is windows. // +optional SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"` @@ -3544,7 +3936,7 @@ type SeccompProfile struct { // localhostProfile indicates a profile defined in a file on the node should be used. // The profile must be preconfigured on the node to work. // Must be a descending path, relative to the kubelet's configured seccomp profile location. - // Must only be set if type is "Localhost". + // Must be set if type is "Localhost". Must NOT be set for any other type. // +optional LocalhostProfile *string `json:"localhostProfile,omitempty" protobuf:"bytes,2,opt,name=localhostProfile"` } @@ -3605,11 +3997,15 @@ type PodDNSConfigOption struct { Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` } -// IP address information for entries in the (plural) PodIPs field. -// Each entry includes: -// IP: An IP address allocated to the pod. Routable at least within the cluster. +// PodIP represents a single IP address allocated to the pod. type PodIP struct { - // ip is an IP address (IPv4 or IPv6) assigned to the pod + // IP is the IP address assigned to the pod + IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` +} + +// HostIP represents a single IP address allocated to the host. +type HostIP struct { + // IP is the IP address assigned to the host IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` } @@ -3676,6 +4072,18 @@ type EphemeralContainerCommon struct { // already allocated to the pod. // +optional Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"` + // Restart policy for the container to manage the restart behavior of each + // container within a pod. + // This may only be set for init containers. You cannot set this field on + // ephemeral containers. + // +featureGate=SidecarContainers + // +optional + RestartPolicy *ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"` // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. // Cannot be updated. // +optional @@ -3764,8 +4172,6 @@ var _ = Container(EphemeralContainerCommon{}) // // To add an ephemeral container, use the ephemeralcontainers subresource of an existing // Pod. Ephemeral containers may not be removed or restarted. -// -// This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate. type EphemeralContainer struct { // Ephemeral containers have all of the fields of Container, plus additional fields // specific to ephemeral containers. Fields in common with Container are in the @@ -3830,10 +4236,23 @@ type PodStatus struct { // +optional NominatedNodeName string `json:"nominatedNodeName,omitempty" protobuf:"bytes,11,opt,name=nominatedNodeName"` - // IP address of the host to which the pod is assigned. Empty if not yet scheduled. + // hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. + // A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will + // not be updated even if there is a node is assigned to pod // +optional HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"` - // IP address allocated to the pod. Routable at least within the cluster. + + // hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must + // match the hostIP field. This list is empty if the pod has not started yet. + // A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will + // not be updated even if there is a node is assigned to this pod. + // +optional + // +patchStrategy=merge + // +patchMergeKey=ip + // +listType=atomic + HostIPs []HostIP `json:"hostIPs,omitempty" protobuf:"bytes,16,rep,name=hostIPs" patchStrategy:"merge" patchMergeKey:"ip"` + + // podIP address allocated to the pod. Routable at least within the cluster. // Empty if not yet allocated. // +optional PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"` @@ -3863,13 +4282,28 @@ type PodStatus struct { ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` // The Quality of Service (QOS) classification assigned to the pod based on resource requirements // See PodQOSClass type for available QOS classes - // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes // +optional QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` // Status for any ephemeral containers that have run in this pod. - // This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate. // +optional EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"` + + // Status of resources resize desired for pod's containers. + // It is empty if no resources resize is pending. + // Any changes to container resources will automatically set this to "Proposed" + // +featureGate=InPlacePodVerticalScaling + // +optional + Resize PodResizeStatus `json:"resize,omitempty" protobuf:"bytes,14,opt,name=resize,casttype=PodResizeStatus"` + + // Status of resource claims. + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + // +listType=map + // +listMapKey=name + // +featureGate=DynamicResourceAllocation + // +optional + ResourceClaimStatuses []PodResourceClaimStatus `json:"resourceClaimStatuses,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,15,rep,name=resourceClaimStatuses"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -4008,6 +4442,7 @@ type ReplicationControllerSpec struct { // Template is the object that describes the pod that will be created if // insufficient replicas are detected. This takes precedence over a TemplateRef. + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` @@ -4016,7 +4451,7 @@ type ReplicationControllerSpec struct { // ReplicationControllerStatus represents the current status of a replication // controller. type ReplicationControllerStatus struct { - // Replicas is the most recently oberved number of replicas. + // Replicas is the most recently observed number of replicas. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` @@ -4168,29 +4603,47 @@ const ( ServiceTypeExternalName ServiceType = "ExternalName" ) -// ServiceInternalTrafficPolicyType describes the type of traffic routing for -// internal traffic +// ServiceInternalTrafficPolicy describes how nodes distribute service traffic they +// receive on the ClusterIP. +// +enum +type ServiceInternalTrafficPolicy string + +const ( + // ServiceInternalTrafficPolicyCluster routes traffic to all endpoints. + ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicy = "Cluster" + + // ServiceInternalTrafficPolicyLocal routes traffic only to endpoints on the same + // node as the client pod (dropping the traffic if there are no local endpoints). + ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicy = "Local" +) + +// for backwards compat +// +enum +type ServiceInternalTrafficPolicyType = ServiceInternalTrafficPolicy + +// ServiceExternalTrafficPolicy describes how nodes distribute service traffic they +// receive on one of the Service's "externally-facing" addresses (NodePorts, ExternalIPs, +// and LoadBalancer IPs. // +enum -type ServiceInternalTrafficPolicyType string +type ServiceExternalTrafficPolicy string const ( - // ServiceInternalTrafficPolicyCluster routes traffic to all endpoints - ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicyType = "Cluster" + // ServiceExternalTrafficPolicyCluster routes traffic to all endpoints. + ServiceExternalTrafficPolicyCluster ServiceExternalTrafficPolicy = "Cluster" - // ServiceInternalTrafficPolicyLocal only routes to node-local - // endpoints, otherwise drops the traffic - ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicyType = "Local" + // ServiceExternalTrafficPolicyLocal preserves the source IP of the traffic by + // routing only to endpoints on the same node as the traffic was received on + // (dropping the traffic if there are no local endpoints). + ServiceExternalTrafficPolicyLocal ServiceExternalTrafficPolicy = "Local" ) -// Service External Traffic Policy Type string +// for backwards compat // +enum -type ServiceExternalTrafficPolicyType string +type ServiceExternalTrafficPolicyType = ServiceExternalTrafficPolicy const ( - // ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior. - ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" - // ServiceExternalTrafficPolicyTypeCluster specifies node-global (legacy) behavior. - ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" + ServiceExternalTrafficPolicyTypeLocal = ServiceExternalTrafficPolicyLocal + ServiceExternalTrafficPolicyTypeCluster = ServiceExternalTrafficPolicyCluster ) // These are the valid conditions of a service. @@ -4198,6 +4651,9 @@ const ( // LoadBalancerPortsError represents the condition of the requested ports // on the cloud load balancer instance. LoadBalancerPortsError = "LoadBalancerPortsError" + // LoadBalancerPortsErrorReason reason in ServiceStatus condition LoadBalancerPortsError + // means the LoadBalancer was not able to be configured correctly. + LoadBalancerPortsErrorReason = "LoadBalancerMixedProtocolNotSupported" ) // ServiceStatus represents the current status of a service. @@ -4255,30 +4711,34 @@ const ( IPv6Protocol IPFamily = "IPv6" ) -// IPFamilyPolicyType represents the dual-stack-ness requested or required by a Service +// IPFamilyPolicy represents the dual-stack-ness requested or required by a Service // +enum -type IPFamilyPolicyType string +type IPFamilyPolicy string const ( // IPFamilyPolicySingleStack indicates that this service is required to have a single IPFamily. // The IPFamily assigned is based on the default IPFamily used by the cluster // or as identified by service.spec.ipFamilies field - IPFamilyPolicySingleStack IPFamilyPolicyType = "SingleStack" + IPFamilyPolicySingleStack IPFamilyPolicy = "SingleStack" // IPFamilyPolicyPreferDualStack indicates that this service prefers dual-stack when // the cluster is configured for dual-stack. If the cluster is not configured // for dual-stack the service will be assigned a single IPFamily. If the IPFamily is not // set in service.spec.ipFamilies then the service will be assigned the default IPFamily // configured on the cluster - IPFamilyPolicyPreferDualStack IPFamilyPolicyType = "PreferDualStack" + IPFamilyPolicyPreferDualStack IPFamilyPolicy = "PreferDualStack" // IPFamilyPolicyRequireDualStack indicates that this service requires dual-stack. Using // IPFamilyPolicyRequireDualStack on a single stack cluster will result in validation errors. The // IPFamilies (and their order) assigned to this service is based on service.spec.ipFamilies. If // service.spec.ipFamilies was not provided then it will be assigned according to how they are // configured on the cluster. If service.spec.ipFamilies has only one entry then the alternative // IPFamily will be added by apiserver - IPFamilyPolicyRequireDualStack IPFamilyPolicyType = "RequireDualStack" + IPFamilyPolicyRequireDualStack IPFamilyPolicy = "RequireDualStack" ) +// for backwards compat +// +enum +type IPFamilyPolicyType = IPFamilyPolicy + // ServiceSpec describes the attributes that a user creates on a service. type ServiceSpec struct { // The list of ports that are exposed by this service. @@ -4384,10 +4844,9 @@ type ServiceSpec struct { // This feature depends on whether the underlying cloud-provider supports specifying // the loadBalancerIP when a load balancer is created. // This field will be ignored if the cloud-provider does not support the feature. - // Deprecated: This field was under-specified and its meaning varies across implementations, - // and it cannot support dual-stack. - // As of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available. - // This field may be removed in a future API version. + // Deprecated: This field was under-specified and its meaning varies across implementations. + // Using it is non-portable and it may not support dual-stack. + // Users are encouraged to use implementation-specific annotations when available. // +optional LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"` @@ -4405,14 +4864,21 @@ type ServiceSpec struct { // +optional ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"` - // externalTrafficPolicy denotes if this Service desires to route external - // traffic to node-local or cluster-wide endpoints. "Local" preserves the - // client source IP and avoids a second hop for LoadBalancer and Nodeport - // type services, but risks potentially imbalanced traffic spreading. - // "Cluster" obscures the client source IP and may cause a second hop to - // another node, but should have good overall load-spreading. - // +optional - ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"` + // externalTrafficPolicy describes how nodes distribute service traffic they + // receive on one of the Service's "externally-facing" addresses (NodePorts, + // ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + // the service in a way that assumes that external load balancers will take care + // of balancing the service traffic between nodes, and so each node will deliver + // traffic only to the node-local endpoints of the service, without masquerading + // the client source IP. (Traffic mistakenly sent to a node with no endpoints will + // be dropped.) The default value, "Cluster", uses the standard behavior of + // routing to all endpoints evenly (possibly modified by topology and other + // features). Note that traffic sent to an External IP or LoadBalancer IP from + // within the cluster will always get "Cluster" semantics, but clients sending to + // a NodePort from within the cluster may need to take traffic policy into account + // when picking a node. + // +optional + ExternalTrafficPolicy ServiceExternalTrafficPolicy `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"` // healthCheckNodePort specifies the healthcheck nodePort for the service. // This only applies when type is set to LoadBalancer and @@ -4423,6 +4889,7 @@ type ServiceSpec struct { // service or not. If this field is specified when creating a Service // which does not need it, creation will fail. This field will be wiped // when updating a Service to no longer need it (e.g. changing type). + // This field cannot be updated once set. // +optional HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"` @@ -4442,7 +4909,7 @@ type ServiceSpec struct { SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"` // TopologyKeys is tombstoned to show why 16 is reserved protobuf tag. - //TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"` + // TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"` // IPFamily is tombstoned to show why 15 is a reserved protobuf tag. // IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"` @@ -4476,7 +4943,7 @@ type ServiceSpec struct { // ipFamilies and clusterIPs fields depend on the value of this field. This // field will be wiped when updating a service to type ExternalName. // +optional - IPFamilyPolicy *IPFamilyPolicyType `json:"ipFamilyPolicy,omitempty" protobuf:"bytes,17,opt,name=ipFamilyPolicy,casttype=IPFamilyPolicyType"` + IPFamilyPolicy *IPFamilyPolicy `json:"ipFamilyPolicy,omitempty" protobuf:"bytes,17,opt,name=ipFamilyPolicy,casttype=IPFamilyPolicy"` // allocateLoadBalancerNodePorts defines if NodePorts will be automatically // allocated for services with type LoadBalancer. Default is "true". It @@ -4498,19 +4965,17 @@ type ServiceSpec struct { // implementation (e.g. cloud providers) should ignore Services that set this field. // This field can only be set when creating or updating a Service to type 'LoadBalancer'. // Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. - // +featureGate=LoadBalancerClass // +optional LoadBalancerClass *string `json:"loadBalancerClass,omitempty" protobuf:"bytes,21,opt,name=loadBalancerClass"` - // InternalTrafficPolicy specifies if the cluster internal traffic - // should be routed to all endpoints or node-local endpoints only. - // "Cluster" routes internal traffic to a Service to all endpoints. - // "Local" routes traffic to node-local endpoints only, traffic is - // dropped if no node-local endpoints are ready. - // The default value is "Cluster". - // +featureGate=ServiceInternalTrafficPolicy + // InternalTrafficPolicy describes how nodes distribute service traffic they + // receive on the ClusterIP. If set to "Local", the proxy will assume that pods + // only want to talk to endpoints of the service on the same node as the pod, + // dropping the traffic if there are no local endpoints. The default value, + // "Cluster", uses the standard behavior of routing to all endpoints evenly + // (possibly modified by topology and other features). // +optional - InternalTrafficPolicy *ServiceInternalTrafficPolicyType `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"` + InternalTrafficPolicy *ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"` } // ServicePort contains information on service's port. @@ -4530,10 +4995,19 @@ type ServicePort struct { Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,6,opt,name=appProtocol"` @@ -4669,17 +5143,18 @@ type ServiceAccountList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Endpoints is a collection of endpoints that implement the actual service. Example: -// Name: "mysvc", -// Subsets: [ -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// }, -// { -// Addresses: [{"ip": "10.10.3.3"}], -// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] -// }, -// ] +// +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] type Endpoints struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -4701,13 +5176,16 @@ type Endpoints struct { // EndpointSubset is a group of addresses with a common set of ports. The // expanded set of endpoints is the Cartesian product of Addresses x Ports. // For example, given: -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// } +// +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// // The resulting set of endpoints can be viewed as: -// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], -// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +// +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] type EndpointSubset struct { // IP addresses which offer the related ports that are marked as ready. These endpoints // should be considered safe for load balancers and clients to utilize. @@ -4727,11 +5205,8 @@ type EndpointSubset struct { // +structType=atomic type EndpointAddress struct { // The IP of this endpoint. - // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), - // or link-local multicast ((224.0.0.0/24). - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, See #4447. + // May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), + // or link-local multicast (224.0.0.0/24 or ff02::/16). IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` // The Hostname of this endpoint // +optional @@ -4764,10 +5239,19 @@ type EndpointPort struct { Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"` // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,opt,name=appProtocol"` @@ -4811,7 +5295,7 @@ type NodeSpec struct { // +optional Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"` - // Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed from Kubelets as of 1.24 and will be fully removed in 1.26. + // Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed. // +optional ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"` @@ -4985,7 +5469,11 @@ type NodeStatus struct { // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses // Note: This field is declared as mergeable, but the merge key is not sufficiently // unique, which can cause data corruption when it is merged. Callers should instead - // use a full-replacement patch. See http://pr.k8s.io/79391 for an example. + // use a full-replacement patch. See https://pr.k8s.io/79391 for an example. + // Consumers should assume that addresses can change during the + // lifetime of a Node. However, there are some exceptions where this may not + // be possible, such as Pods that inherit a Node's address in its own status or + // consumers of the downward API (status.hostIP). // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -5058,7 +5546,7 @@ type PodSignature struct { // Describe a container image type ContainerImage struct { // Names by which this image is known. - // e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + // e.g. ["kubernetes.example/hyperkube:v1.0.7", "cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7"] // +optional Names []string `json:"names" protobuf:"bytes,1,rep,name=names"` // The size of the image in bytes. @@ -5580,6 +6068,7 @@ type ServiceProxyOptions struct { // and the version of the actual struct is irrelevant. // 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type // will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. +// // Instead of using this type, create a locally provided and used type that is well-focused on your reference. // For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -6453,12 +6942,9 @@ type WindowsSecurityContextOptions struct { RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"` // HostProcess determines if a container should be run as a 'Host Process' container. - // This field is alpha-level and will only be honored by components that enable the - // WindowsHostProcessContainers feature flag. Setting this field without the feature - // flag will result in errors when validating the Pod. All of a Pod's containers must - // have the same effective HostProcess value (it is not allowed to have a mix of HostProcess - // containers and non-HostProcess containers). In addition, if HostProcess is true - // then HostNetwork must also be set to true. + // All of a Pod's containers must have the same effective HostProcess value + // (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + // In addition, if HostProcess is true then HostNetwork must also be set to true. // +optional HostProcess *bool `json:"hostProcess,omitempty" protobuf:"bytes,4,opt,name=hostProcess"` } @@ -6539,6 +7025,13 @@ const ( PortForwardRequestIDHeader = "requestID" ) +const ( + // MixedProtocolNotSupported error in PortStatus means that the cloud provider + // can't publish the port on the load balancer because mixed values of protocols + // on the same LoadBalancer type of Service are not supported by the cloud provider. + MixedProtocolNotSupported = "MixedProtocolNotSupported" +) + // PortStatus represents the error condition of a service port type PortStatus struct { diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 53a06b945..9734d8b41 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AWSElasticBlockStoreVolumeSource = map[string]string{ @@ -126,7 +126,8 @@ var map_CSIPersistentVolumeSource = map[string]string{ "controllerPublishSecretRef": "controllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "nodeStageSecretRef": "nodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "nodePublishSecretRef": "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "controllerExpandSecretRef": "controllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an alpha field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "controllerExpandSecretRef": "controllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodeExpandSecretRef": "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This is a beta field which is enabled default by CSINodeExpandSecret feature gate. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.", } func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -208,6 +209,16 @@ func (CinderVolumeSource) SwaggerDoc() map[string]string { return map_CinderVolumeSource } +var map_ClaimSource = map[string]string{ + "": "ClaimSource describes a reference to a ResourceClaim.\n\nExactly one of these fields should be set. Consumers of this type must treat an empty object as if it has an unknown value.", + "resourceClaimName": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.", + "resourceClaimTemplateName": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.", +} + +func (ClaimSource) SwaggerDoc() map[string]string { + return map_ClaimSource +} + var map_ClientIPConfig = map[string]string{ "": "ClientIPConfig represents the configurations of Client IP based session affinity.", "timeoutSeconds": "timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours).", @@ -331,10 +342,12 @@ var map_Container = map[string]string{ "command": "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "args": "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", - "ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + "ports": "List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.", "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "resizePolicy": "Resources resize policy for the container.", + "restartPolicy": "RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.", "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", @@ -356,7 +369,7 @@ func (Container) SwaggerDoc() map[string]string { var map_ContainerImage = map[string]string{ "": "Describe a container image", - "names": "Names by which this image is known. e.g. [\"k8s.gcr.io/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]", + "names": "Names by which this image is known. e.g. [\"kubernetes.example/hyperkube:v1.0.7\", \"cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7\"]", "sizeBytes": "The size of the image in bytes.", } @@ -377,6 +390,16 @@ func (ContainerPort) SwaggerDoc() map[string]string { return map_ContainerPort } +var map_ContainerResizePolicy = map[string]string{ + "": "ContainerResizePolicy represents resource resize policy for the container.", + "resourceName": "Name of the resource to which this resource resize policy applies. Supported values: cpu, memory.", + "restartPolicy": "Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired.", +} + +func (ContainerResizePolicy) SwaggerDoc() map[string]string { + return map_ContainerResizePolicy +} + var map_ContainerState = map[string]string{ "": "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.", "waiting": "Details about a waiting container", @@ -423,16 +446,18 @@ func (ContainerStateWaiting) SwaggerDoc() map[string]string { } var map_ContainerStatus = map[string]string{ - "": "ContainerStatus contains details for the current status of this container.", - "name": "This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.", - "state": "Details about the container's current condition.", - "lastState": "Details about the container's last termination condition.", - "ready": "Specifies whether the container has passed its readiness probe.", - "restartCount": "The number of times the container has been restarted.", - "image": "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images.", - "imageID": "ImageID of the container's image.", - "containerID": "Container's ID in the format '://'.", - "started": "Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined.", + "": "ContainerStatus contains details for the current status of this container.", + "name": "Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated.", + "state": "State holds details about the container's current condition.", + "lastState": "LastTerminationState holds the last termination state of the container to help debug container crashes and restarts. This field is not populated if the container is still running and RestartCount is 0.", + "ready": "Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field).\n\nThe value is typically used to determine whether a container is ready to accept traffic.", + "restartCount": "RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative.", + "image": "Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images.", + "imageID": "ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime.", + "containerID": "ContainerID is the ID of the container in the format '://'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\").", + "started": "Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false.", + "allocatedResources": "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.", + "resources": "Resources represents the compute resource requests and limits that have been successfully enacted on the running container after it has been started or has been successfully resized.", } func (ContainerStatus) SwaggerDoc() map[string]string { @@ -482,7 +507,7 @@ func (DownwardAPIVolumeSource) SwaggerDoc() map[string]string { var map_EmptyDirVolumeSource = map[string]string{ "": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", "medium": "medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", - "sizeLimit": "sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir", + "sizeLimit": "sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", } func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { @@ -491,7 +516,7 @@ func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { var map_EndpointAddress = map[string]string{ "": "EndpointAddress is a tuple that describes single IP address.", - "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready.", + "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16).", "hostname": "The Hostname of this endpoint", "nodeName": "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.", "targetRef": "Reference to object providing the endpoint.", @@ -506,7 +531,7 @@ var map_EndpointPort = map[string]string{ "name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", "port": "The port number of the endpoint.", "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -514,7 +539,7 @@ func (EndpointPort) SwaggerDoc() map[string]string { } var map_EndpointSubset = map[string]string{ - "": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n }\nThe resulting set of endpoints can be viewed as:\n a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n b: [ 10.10.1.1:309, 10.10.2.2:309 ]", + "": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n\n\t{\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t}\n\nThe resulting set of endpoints can be viewed as:\n\n\ta: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n\tb: [ 10.10.1.1:309, 10.10.2.2:309 ]", "addresses": "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.", "notReadyAddresses": "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.", "ports": "Port numbers available on the related IP addresses.", @@ -525,7 +550,7 @@ func (EndpointSubset) SwaggerDoc() map[string]string { } var map_Endpoints = map[string]string{ - "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", + "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n\n\t Name: \"mysvc\",\n\t Subsets: [\n\t {\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t },\n\t {\n\t Addresses: [{\"ip\": \"10.10.3.3\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n\t },\n\t]", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "subsets": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", } @@ -579,7 +604,7 @@ func (EnvVarSource) SwaggerDoc() map[string]string { } var map_EphemeralContainer = map[string]string{ - "": "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation.\n\nTo add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted.\n\nThis is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate.", + "": "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation.\n\nTo add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted.", "targetContainerName": "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec.\n\nThe container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined.", } @@ -598,6 +623,8 @@ var map_EphemeralContainerCommon = map[string]string{ "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.", + "resizePolicy": "Resources resize policy for the container.", + "restartPolicy": "Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers.", "volumeMounts": "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.", "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", "livenessProbe": "Probes are not allowed for ephemeral containers.", @@ -807,7 +834,7 @@ func (HTTPGetAction) SwaggerDoc() map[string]string { var map_HTTPHeader = map[string]string{ "": "HTTPHeader describes a custom header to be used in HTTP probes", - "name": "The header field name", + "name": "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.", "value": "The header field value", } @@ -825,6 +852,15 @@ func (HostAlias) SwaggerDoc() map[string]string { return map_HostAlias } +var map_HostIP = map[string]string{ + "": "HostIP represents a single IP address allocated to the host.", + "ip": "IP is the IP address assigned to the host", +} + +func (HostIP) SwaggerDoc() map[string]string { + return map_HostIP +} + var map_HostPathVolumeSource = map[string]string{ "": "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.", "path": "path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", @@ -1188,7 +1224,7 @@ var map_NodeSpec = map[string]string{ "providerID": "ID of the node assigned by the cloud provider in the format: ://", "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration", "taints": "If specified, the node's taints.", - "configSource": "Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed from Kubelets as of 1.24 and will be fully removed in 1.26.", + "configSource": "Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed.", "externalID": "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966", } @@ -1202,7 +1238,7 @@ var map_NodeStatus = map[string]string{ "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", - "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See http://pr.k8s.io/79391 for an example.", + "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).", "daemonEndpoints": "Endpoints of daemons running on the Node.", "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", "images": "List of container images on this node", @@ -1281,7 +1317,7 @@ func (PersistentVolumeClaim) SwaggerDoc() map[string]string { } var map_PersistentVolumeClaimCondition = map[string]string{ - "": "PersistentVolumeClaimCondition contails details about state of pvc", + "": "PersistentVolumeClaimCondition contains details about state of pvc", "lastProbeTime": "lastProbeTime is the time we probed the condition.", "lastTransitionTime": "lastTransitionTime is the time the condition transitioned from one status to another.", "reason": "reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.", @@ -1310,8 +1346,8 @@ var map_PersistentVolumeClaimSpec = map[string]string{ "volumeName": "volumeName is the binding reference to the PersistentVolume backing this claim.", "storageClassName": "storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", - "dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.", - "dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While DataSource ignores disallowed values (dropping them), DataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.", + "dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.", + "dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1319,13 +1355,13 @@ func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { } var map_PersistentVolumeClaimStatus = map[string]string{ - "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", - "phase": "phase represents the current phase of PersistentVolumeClaim.", - "accessModes": "accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", - "capacity": "capacity represents the actual resources of the underlying volume.", - "conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", - "allocatedResources": "allocatedResources is the storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", - "resizeStatus": "resizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", + "phase": "phase represents the current phase of PersistentVolumeClaim.", + "accessModes": "accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "capacity": "capacity represents the actual resources of the underlying volume.", + "conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", + "allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", } func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { @@ -1409,10 +1445,11 @@ func (PersistentVolumeSpec) SwaggerDoc() map[string]string { } var map_PersistentVolumeStatus = map[string]string{ - "": "PersistentVolumeStatus is the current status of a persistent volume.", - "phase": "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase", - "message": "message is a human-readable message indicating details about why the volume is in this state.", - "reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", + "": "PersistentVolumeStatus is the current status of a persistent volume.", + "phase": "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase", + "message": "message is a human-readable message indicating details about why the volume is in this state.", + "reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", + "lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature.", } func (PersistentVolumeStatus) SwaggerDoc() map[string]string { @@ -1534,8 +1571,8 @@ func (PodExecOptions) SwaggerDoc() map[string]string { } var map_PodIP = map[string]string{ - "": "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n IP: An IP address allocated to the pod. Routable at least within the cluster.", - "ip": "ip is an IP address (IPv4 or IPv6) assigned to the pod", + "": "PodIP represents a single IP address allocated to the pod.", + "ip": "IP is the IP address assigned to the pod", } func (PodIP) SwaggerDoc() map[string]string { @@ -1605,6 +1642,35 @@ func (PodReadinessGate) SwaggerDoc() map[string]string { return map_PodReadinessGate } +var map_PodResourceClaim = map[string]string{ + "": "PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.", + "name": "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.", + "source": "Source describes where to find the ResourceClaim.", +} + +func (PodResourceClaim) SwaggerDoc() map[string]string { + return map_PodResourceClaim +} + +var map_PodResourceClaimStatus = map[string]string{ + "": "PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim which references a ResourceClaimTemplate. It stores the generated name for the corresponding ResourceClaim.", + "name": "Name uniquely identifies this resource claim inside the pod. This must match the name of an entry in pod.spec.resourceClaims, which implies that the string must be a DNS_LABEL.", + "resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. It this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.", +} + +func (PodResourceClaimStatus) SwaggerDoc() map[string]string { + return map_PodResourceClaimStatus +} + +var map_PodSchedulingGate = map[string]string{ + "": "PodSchedulingGate is associated to a Pod to guard its scheduling.", + "name": "Name of the scheduling gate. Each scheduling gate must have a unique name field.", +} + +func (PodSchedulingGate) SwaggerDoc() map[string]string { + return map_PodSchedulingGate +} + var map_PodSecurityContext = map[string]string{ "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", @@ -1612,7 +1678,7 @@ var map_PodSecurityContext = map[string]string{ "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.", + "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.", "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.", "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.", @@ -1637,8 +1703,8 @@ var map_PodSpec = map[string]string{ "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", - "ephemeralContainers": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate.", - "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", + "ephemeralContainers": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.", + "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", "dnsPolicy": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", @@ -1669,7 +1735,10 @@ var map_PodSpec = map[string]string{ "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", "topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", "setHostnameAsFQDN": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.", - "os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup This is a beta field and requires the IdentifyPodOS feature", + "os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup", + "hostUsers": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.", + "schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\n\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate.", + "resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1683,14 +1752,17 @@ var map_PodStatus = map[string]string{ "message": "A human readable message indicating details about why the pod is in this condition.", "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", "nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", - "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", - "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", + "hostIP": "hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod", + "hostIPs": "hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod.", + "podIP": "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", "podIPs": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.", "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", "containerStatuses": "The list has one entry per container in the manifest. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", - "ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod. This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate.", + "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes", + "ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod.", + "resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\"", + "resourceClaimStatuses": "Status of resource claims.", } func (PodStatus) SwaggerDoc() map[string]string { @@ -1808,7 +1880,7 @@ var map_ProbeHandler = map[string]string{ "exec": "Exec specifies the action to take.", "httpGet": "HTTPGet specifies the http request to perform.", "tcpSocket": "TCPSocket specifies an action involving a TCP port.", - "grpc": "GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.", + "grpc": "GRPC specifies an action involving a GRPC port.", } func (ProbeHandler) SwaggerDoc() map[string]string { @@ -1921,7 +1993,7 @@ var map_ReplicationControllerSpec = map[string]string{ "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "selector": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. The only allowed template.spec.restartPolicy value is \"Always\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", } func (ReplicationControllerSpec) SwaggerDoc() map[string]string { @@ -1930,7 +2002,7 @@ func (ReplicationControllerSpec) SwaggerDoc() map[string]string { var map_ReplicationControllerStatus = map[string]string{ "": "ReplicationControllerStatus represents the current status of a replication controller.", - "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", + "replicas": "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replication controller.", "readyReplicas": "The number of ready replicas for this replication controller.", "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replication controller.", @@ -1942,6 +2014,15 @@ func (ReplicationControllerStatus) SwaggerDoc() map[string]string { return map_ReplicationControllerStatus } +var map_ResourceClaim = map[string]string{ + "": "ResourceClaim references one entry in PodSpec.ResourceClaims.", + "name": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.", +} + +func (ResourceClaim) SwaggerDoc() map[string]string { + return map_ResourceClaim +} + var map_ResourceFieldSelector = map[string]string{ "": "ResourceFieldSelector represents container resources (cpu, memory) and their output format", "containerName": "Container name: required for volumes, optional for env vars", @@ -1998,7 +2079,8 @@ func (ResourceQuotaStatus) SwaggerDoc() map[string]string { var map_ResourceRequirements = map[string]string{ "": "ResourceRequirements describes the compute resource requirements.", "limits": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", - "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "claims": "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.", } func (ResourceRequirements) SwaggerDoc() map[string]string { @@ -2076,7 +2158,7 @@ func (ScopedResourceSelectorRequirement) SwaggerDoc() map[string]string { var map_SeccompProfile = map[string]string{ "": "SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.", "type": "type indicates which kind of seccomp profile will be applied. Valid options are:\n\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.", - "localhostProfile": "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \"Localhost\".", + "localhostProfile": "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type.", } func (SeccompProfile) SwaggerDoc() map[string]string { @@ -2243,7 +2325,7 @@ var map_ServicePort = map[string]string{ "": "ServicePort contains information on service's port.", "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.", "protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", "port": "The port that will be exposed by this service.", "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", "nodePort": "The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", @@ -2271,18 +2353,18 @@ var map_ServiceSpec = map[string]string{ "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \"ExternalName\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "loadBalancerIP": "Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was under-specified and its meaning varies across implementations, and it cannot support dual-stack. As of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available. This field may be removed in a future API version.", + "loadBalancerIP": "Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was under-specified and its meaning varies across implementations. Using it is non-portable and it may not support dual-stack. Users are encouraged to use implementation-specific annotations when available.", "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/", "externalName": "externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be \"ExternalName\".", - "externalTrafficPolicy": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.", - "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type).", + "externalTrafficPolicy": "externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's \"externally-facing\" addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to \"Local\", the proxy will configure the service in a way that assumes that external load balancers will take care of balancing the service traffic between nodes, and so each node will deliver traffic only to the node-local endpoints of the service, without masquerading the client source IP. (Traffic mistakenly sent to a node with no endpoints will be dropped.) The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). Note that traffic sent to an External IP or LoadBalancer IP from within the cluster will always get \"Cluster\" semantics, but clients sending to a NodePort from within the cluster may need to take traffic policy into account when picking a node.", + "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type). This field cannot be updated once set.", "publishNotReadyAddresses": "publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior.", "sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.", "ipFamilies": "IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName.\n\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.", "ipFamilyPolicy": "IPFamilyPolicy represents the dual-stack-ness requested or required by this Service. If there is no value provided, then this field will be set to SingleStack. Services can be \"SingleStack\" (a single IP family), \"PreferDualStack\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \"RequireDualStack\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName.", "allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type.", "loadBalancerClass": "loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.", - "internalTrafficPolicy": "InternalTrafficPolicy specifies if the cluster internal traffic should be routed to all endpoints or node-local endpoints only. \"Cluster\" routes internal traffic to a Service to all endpoints. \"Local\" routes traffic to node-local endpoints only, traffic is dropped if no node-local endpoints are ready. The default value is \"Cluster\".", + "internalTrafficPolicy": "InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \"Local\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features).", } func (ServiceSpec) SwaggerDoc() map[string]string { @@ -2399,12 +2481,15 @@ func (TopologySelectorTerm) SwaggerDoc() map[string]string { } var map_TopologySpreadConstraint = map[string]string{ - "": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.", - "maxSkew": "MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. ", - "topologyKey": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.", - "whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ", - "labelSelector": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.", - "minDomains": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: ", + "": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.", + "maxSkew": "MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. ", + "topologyKey": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.", + "whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ", + "labelSelector": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.", + "minDomains": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: ", + "nodeAffinityPolicy": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.", + "nodeTaintsPolicy": "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.", + "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.\n\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).", } func (TopologySpreadConstraint) SwaggerDoc() map[string]string { @@ -2422,6 +2507,17 @@ func (TypedLocalObjectReference) SwaggerDoc() map[string]string { return map_TypedLocalObjectReference } +var map_TypedObjectReference = map[string]string{ + "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "kind": "Kind is the type of resource being referenced", + "name": "Name is the name of resource being referenced", + "namespace": "Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", +} + +func (TypedObjectReference) SwaggerDoc() map[string]string { + return map_TypedObjectReference +} + var map_Volume = map[string]string{ "": "Volume represents a named volume in a pod that may be accessed by any container in the pod.", "name": "name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", @@ -2540,7 +2636,7 @@ var map_WindowsSecurityContextOptions = map[string]string{ "gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use.", "gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.", "runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "hostProcess": "HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.", + "hostProcess": "HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.", } func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/core/v1/well_known_labels.go b/vendor/k8s.io/api/core/v1/well_known_labels.go index 5cf82a981..8c3cb87b8 100644 --- a/vendor/k8s.io/api/core/v1/well_known_labels.go +++ b/vendor/k8s.io/api/core/v1/well_known_labels.go @@ -19,6 +19,10 @@ package v1 const ( LabelHostname = "kubernetes.io/hostname" + // Label value is the network location of kube-apiserver stored as + // Stored in APIServer Identity lease objects to view what address is used for peer proxy + AnnotationPeerAdvertiseAddress = "kubernetes.io/peer-advertise-address" + LabelTopologyZone = "topology.kubernetes.io/zone" LabelTopologyRegion = "topology.kubernetes.io/region" diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index a3fbd14c6..d76f0bbbc 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -243,6 +243,11 @@ func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource *out = new(SecretReference) **out = **in } + if in.NodeExpandSecretRef != nil { + in, out := &in.NodeExpandSecretRef, &out.NodeExpandSecretRef + *out = new(SecretReference) + **out = **in + } return } @@ -414,6 +419,32 @@ func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimSource) DeepCopyInto(out *ClaimSource) { + *out = *in + if in.ResourceClaimName != nil { + in, out := &in.ResourceClaimName, &out.ResourceClaimName + *out = new(string) + **out = **in + } + if in.ResourceClaimTemplateName != nil { + in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimSource. +func (in *ClaimSource) DeepCopy() *ClaimSource { + if in == nil { + return nil + } + out := new(ClaimSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) { *out = *in @@ -757,6 +788,16 @@ func (in *Container) DeepCopyInto(out *Container) { } } in.Resources.DeepCopyInto(&out.Resources) + if in.ResizePolicy != nil { + in, out := &in.ResizePolicy, &out.ResizePolicy + *out = make([]ContainerResizePolicy, len(*in)) + copy(*out, *in) + } + if in.RestartPolicy != nil { + in, out := &in.RestartPolicy, &out.RestartPolicy + *out = new(ContainerRestartPolicy) + **out = **in + } if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts *out = make([]VolumeMount, len(*in)) @@ -844,6 +885,22 @@ func (in *ContainerPort) DeepCopy() *ContainerPort { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResizePolicy) DeepCopyInto(out *ContainerResizePolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResizePolicy. +func (in *ContainerResizePolicy) DeepCopy() *ContainerResizePolicy { + if in == nil { + return nil + } + out := new(ContainerResizePolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ContainerState) DeepCopyInto(out *ContainerState) { *out = *in @@ -936,6 +993,18 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { *out = new(bool) **out = **in } + if in.AllocatedResources != nil { + in, out := &in.AllocatedResources, &out.AllocatedResources + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } return } @@ -1351,6 +1420,16 @@ func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon) } } in.Resources.DeepCopyInto(&out.Resources) + if in.ResizePolicy != nil { + in, out := &in.ResizePolicy, &out.ResizePolicy + *out = make([]ContainerResizePolicy, len(*in)) + copy(*out, *in) + } + if in.RestartPolicy != nil { + in, out := &in.RestartPolicy, &out.RestartPolicy + *out = new(ContainerRestartPolicy) + **out = **in + } if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts *out = make([]VolumeMount, len(*in)) @@ -1802,6 +1881,22 @@ func (in *HostAlias) DeepCopy() *HostAlias { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostIP) DeepCopyInto(out *HostIP) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostIP. +func (in *HostIP) DeepCopy() *HostIP { + if in == nil { + return nil + } + out := new(HostIP) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) { *out = *in @@ -2826,7 +2921,7 @@ func (in *PersistentVolume) DeepCopyInto(out *PersistentVolume) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } @@ -2958,7 +3053,7 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec } if in.DataSourceRef != nil { in, out := &in.DataSourceRef, &out.DataSourceRef - *out = new(TypedLocalObjectReference) + *out = new(TypedObjectReference) (*in).DeepCopyInto(*out) } return @@ -3003,10 +3098,12 @@ func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimSt (*out)[key] = val.DeepCopy() } } - if in.ResizeStatus != nil { - in, out := &in.ResizeStatus, &out.ResizeStatus - *out = new(PersistentVolumeClaimResizeStatus) - **out = **in + if in.AllocatedResourceStatuses != nil { + in, out := &in.AllocatedResourceStatuses, &out.AllocatedResourceStatuses + *out = make(map[ResourceName]ClaimResourceStatus, len(*in)) + for key, val := range *in { + (*out)[key] = val + } } return } @@ -3266,6 +3363,10 @@ func (in *PersistentVolumeSpec) DeepCopy() *PersistentVolumeSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PersistentVolumeStatus) DeepCopyInto(out *PersistentVolumeStatus) { *out = *in + if in.LastPhaseTransitionTime != nil { + in, out := &in.LastPhaseTransitionTime, &out.LastPhaseTransitionTime + *out = (*in).DeepCopy() + } return } @@ -3721,6 +3822,60 @@ func (in *PodReadinessGate) DeepCopy() *PodReadinessGate { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodResourceClaim) DeepCopyInto(out *PodResourceClaim) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodResourceClaim. +func (in *PodResourceClaim) DeepCopy() *PodResourceClaim { + if in == nil { + return nil + } + out := new(PodResourceClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodResourceClaimStatus) DeepCopyInto(out *PodResourceClaimStatus) { + *out = *in + if in.ResourceClaimName != nil { + in, out := &in.ResourceClaimName, &out.ResourceClaimName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodResourceClaimStatus. +func (in *PodResourceClaimStatus) DeepCopy() *PodResourceClaimStatus { + if in == nil { + return nil + } + out := new(PodResourceClaimStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSchedulingGate) DeepCopyInto(out *PodSchedulingGate) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingGate. +func (in *PodSchedulingGate) DeepCopy() *PodSchedulingGate { + if in == nil { + return nil + } + out := new(PodSchedulingGate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { *out = *in @@ -3949,6 +4104,23 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = new(PodOS) **out = **in } + if in.HostUsers != nil { + in, out := &in.HostUsers, &out.HostUsers + *out = new(bool) + **out = **in + } + if in.SchedulingGates != nil { + in, out := &in.SchedulingGates, &out.SchedulingGates + *out = make([]PodSchedulingGate, len(*in)) + copy(*out, *in) + } + if in.ResourceClaims != nil { + in, out := &in.ResourceClaims, &out.ResourceClaims + *out = make([]PodResourceClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -3972,6 +4144,11 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.HostIPs != nil { + in, out := &in.HostIPs, &out.HostIPs + *out = make([]HostIP, len(*in)) + copy(*out, *in) + } if in.PodIPs != nil { in, out := &in.PodIPs, &out.PodIPs *out = make([]PodIP, len(*in)) @@ -4002,6 +4179,13 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ResourceClaimStatuses != nil { + in, out := &in.ResourceClaimStatuses, &out.ResourceClaimStatuses + *out = make([]PodResourceClaimStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -4532,6 +4716,22 @@ func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim. +func (in *ResourceClaim) DeepCopy() *ResourceClaim { + if in == nil { + return nil + } + out := new(ResourceClaim) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) { *out = *in @@ -4712,6 +4912,11 @@ func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { (*out)[key] = val.DeepCopy() } } + if in.Claims != nil { + in, out := &in.Claims, &out.Claims + *out = make([]ResourceClaim, len(*in)) + copy(*out, *in) + } return } @@ -5400,7 +5605,7 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { } if in.IPFamilyPolicy != nil { in, out := &in.IPFamilyPolicy, &out.IPFamilyPolicy - *out = new(IPFamilyPolicyType) + *out = new(IPFamilyPolicy) **out = **in } if in.AllocateLoadBalancerNodePorts != nil { @@ -5415,7 +5620,7 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { } if in.InternalTrafficPolicy != nil { in, out := &in.InternalTrafficPolicy, &out.InternalTrafficPolicy - *out = new(ServiceInternalTrafficPolicyType) + *out = new(ServiceInternalTrafficPolicy) **out = **in } return @@ -5649,6 +5854,21 @@ func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint) *out = new(int32) **out = **in } + if in.NodeAffinityPolicy != nil { + in, out := &in.NodeAffinityPolicy, &out.NodeAffinityPolicy + *out = new(NodeInclusionPolicy) + **out = **in + } + if in.NodeTaintsPolicy != nil { + in, out := &in.NodeTaintsPolicy, &out.NodeTaintsPolicy + *out = new(NodeInclusionPolicy) + **out = **in + } + if in.MatchLabelKeys != nil { + in, out := &in.MatchLabelKeys, &out.MatchLabelKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -5683,6 +5903,32 @@ func (in *TypedLocalObjectReference) DeepCopy() *TypedLocalObjectReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypedObjectReference) DeepCopyInto(out *TypedObjectReference) { + *out = *in + if in.APIGroup != nil { + in, out := &in.APIGroup, &out.APIGroup + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedObjectReference. +func (in *TypedObjectReference) DeepCopy() *TypedObjectReference { + if in == nil { + return nil + } + out := new(TypedObjectReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Volume) DeepCopyInto(out *Volume) { *out = *in diff --git a/vendor/k8s.io/api/discovery/v1/generated.proto b/vendor/k8s.io/api/discovery/v1/generated.proto index 44d9d19e2..490ce8922 100644 --- a/vendor/k8s.io/api/discovery/v1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1/generated.proto @@ -66,8 +66,7 @@ message Endpoint { map deprecatedTopology = 5; // nodeName represents the name of the Node hosting this endpoint. This can - // be used to determine endpoints local to a Node. This field can be enabled - // with the EndpointSliceNodeName feature gate. + // be used to determine endpoints local to a Node. // +optional optional string nodeName = 6; @@ -87,22 +86,22 @@ message EndpointConditions { // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this // unknown state as ready. For compatibility reasons, ready should never be - // "true" for terminating endpoints. + // "true" for terminating endpoints, except when the normal readiness + // behavior is being explicitly overridden, for example when the associated + // Service has set the publishNotReadyAddresses flag. // +optional optional bool ready = 1; // serving is identical to ready except that it is set regardless of the // terminating state of endpoints. This condition should be set to true for // a ready endpoint that is terminating. If nil, consumers should defer to - // the ready condition. This field can be enabled with the - // EndpointSliceTerminatingCondition feature gate. + // the ready condition. // +optional optional bool serving = 2; // terminating indicates that this endpoint is terminating. A nil value // indicates an unknown state. Consumers should interpret this unknown state - // to mean that the endpoint is not terminating. This field can be enabled - // with the EndpointSliceTerminatingCondition feature gate. + // to mean that the endpoint is not terminating. // +optional optional bool terminating = 3; } @@ -118,9 +117,8 @@ message EndpointHints { // EndpointPort represents a Port used by an EndpointSlice // +structType=atomic message EndpointPort { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. @@ -128,21 +126,30 @@ message EndpointPort { // Default is empty string. optional string name = 1; - // The IP protocol for this port. + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. optional string protocol = 2; - // The port number of the endpoint. + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. optional int32 port = 3; // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional optional string appProtocol = 4; @@ -186,7 +193,7 @@ message EndpointSliceList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // List of endpoint slices + // items is the list of endpoint slices repeated EndpointSlice items = 2; } diff --git a/vendor/k8s.io/api/discovery/v1/types.go b/vendor/k8s.io/api/discovery/v1/types.go index 13591436d..efbb09918 100644 --- a/vendor/k8s.io/api/discovery/v1/types.go +++ b/vendor/k8s.io/api/discovery/v1/types.go @@ -29,9 +29,11 @@ import ( // labels, which must be joined to produce the full set of endpoints. type EndpointSlice struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // addressType specifies the type of address carried by this EndpointSlice. // All addresses in this slice must be the same type. This field is // immutable after creation. The following address types are currently @@ -40,10 +42,12 @@ type EndpointSlice struct { // * IPv6: Represents an IPv6 Address. // * FQDN: Represents a Fully Qualified Domain Name. AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + // endpoints is a list of unique endpoints in this slice. Each slice may // include a maximum of 1000 endpoints. // +listType=atomic Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` + // ports specifies the list of network ports exposed by each endpoint in // this slice. Each port must have a unique name. When ports is empty, it // indicates that there are no defined ports. When a port is defined with a @@ -61,8 +65,10 @@ type AddressType string const ( // AddressTypeIPv4 represents an IPv4 Address. AddressTypeIPv4 = AddressType(v1.IPv4Protocol) + // AddressTypeIPv6 represents an IPv6 Address. AddressTypeIPv6 = AddressType(v1.IPv6Protocol) + // AddressTypeFQDN represents a FQDN. AddressTypeFQDN = AddressType("FQDN") ) @@ -77,8 +83,10 @@ type Endpoint struct { // use the first element. Refer to: https://issue.k8s.io/106267 // +listType=set Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` + // conditions contains information about the current status of the endpoint. Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"` + // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered @@ -86,6 +94,7 @@ type Endpoint struct { // Label (RFC 1123) validation. // +optional Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // targetRef is a reference to a Kubernetes object that represents this // endpoint. // +optional @@ -101,13 +110,14 @@ type Endpoint struct { DeprecatedTopology map[string]string `json:"deprecatedTopology,omitempty" protobuf:"bytes,5,opt,name=deprecatedTopology"` // nodeName represents the name of the Node hosting this endpoint. This can - // be used to determine endpoints local to a Node. This field can be enabled - // with the EndpointSliceNodeName feature gate. + // be used to determine endpoints local to a Node. // +optional NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"` + // zone is the name of the Zone this endpoint exists in. // +optional Zone *string `json:"zone,omitempty" protobuf:"bytes,7,opt,name=zone"` + // hints contains information associated with how an endpoint should be // consumed. // +optional @@ -120,22 +130,22 @@ type EndpointConditions struct { // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this // unknown state as ready. For compatibility reasons, ready should never be - // "true" for terminating endpoints. + // "true" for terminating endpoints, except when the normal readiness + // behavior is being explicitly overridden, for example when the associated + // Service has set the publishNotReadyAddresses flag. // +optional Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"` // serving is identical to ready except that it is set regardless of the // terminating state of endpoints. This condition should be set to true for // a ready endpoint that is terminating. If nil, consumers should defer to - // the ready condition. This field can be enabled with the - // EndpointSliceTerminatingCondition feature gate. + // the ready condition. // +optional Serving *bool `json:"serving,omitempty" protobuf:"bytes,2,name=serving"` // terminating indicates that this endpoint is terminating. A nil value // indicates an unknown state. Consumers should interpret this unknown state - // to mean that the endpoint is not terminating. This field can be enabled - // with the EndpointSliceTerminatingCondition feature gate. + // to mean that the endpoint is not terminating. // +optional Terminating *bool `json:"terminating,omitempty" protobuf:"bytes,3,name=terminating"` } @@ -157,28 +167,39 @@ type ForZone struct { // EndpointPort represents a Port used by an EndpointSlice // +structType=atomic type EndpointPort struct { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. // * must start and end with an alphanumeric character. // Default is empty string. Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` - // The IP protocol for this port. + + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` - // The port number of the endpoint. + + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` + // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,name=appProtocol"` @@ -189,9 +210,11 @@ type EndpointPort struct { // EndpointSliceList represents a list of endpoint slices type EndpointSliceList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // List of endpoint slices + + // items is the list of endpoint slices Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go index bb3c8769f..bef774539 100644 --- a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Endpoint = map[string]string{ @@ -34,7 +34,7 @@ var map_Endpoint = map[string]string{ "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation.", "targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.", "deprecatedTopology": "deprecatedTopology contains topology information part of the v1beta1 API. This field is deprecated, and will be removed when the v1beta1 API is removed (no sooner than kubernetes v1.24). While this field can hold values, it is not writable through the v1 API, and any attempts to write to it will be silently ignored. Topology information can be found in the zone and nodeName fields instead.", - "nodeName": "nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node. This field can be enabled with the EndpointSliceNodeName feature gate.", + "nodeName": "nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node.", "zone": "zone is the name of the Zone this endpoint exists in.", "hints": "hints contains information associated with how an endpoint should be consumed.", } @@ -45,9 +45,9 @@ func (Endpoint) SwaggerDoc() map[string]string { var map_EndpointConditions = map[string]string{ "": "EndpointConditions represents the current condition of an endpoint.", - "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints.", - "serving": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.", - "terminating": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.", + "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints, except when the normal readiness behavior is being explicitly overridden, for example when the associated Service has set the publishNotReadyAddresses flag.", + "serving": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition.", + "terminating": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating.", } func (EndpointConditions) SwaggerDoc() map[string]string { @@ -65,10 +65,10 @@ func (EndpointHints) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", - "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -90,7 +90,7 @@ func (EndpointSlice) SwaggerDoc() map[string]string { var map_EndpointSliceList = map[string]string{ "": "EndpointSliceList represents a list of endpoint slices", "metadata": "Standard list metadata.", - "items": "List of endpoint slices", + "items": "items is the list of endpoint slices", } func (EndpointSliceList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/vendor/k8s.io/api/discovery/v1beta1/generated.proto index 84bdf075a..8b6c360b0 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.proto @@ -73,8 +73,7 @@ message Endpoint { map topology = 5; // nodeName represents the name of the Node hosting this endpoint. This can - // be used to determine endpoints local to a Node. This field can be enabled - // with the EndpointSliceNodeName feature gate. + // be used to determine endpoints local to a Node. // +optional optional string nodeName = 6; @@ -98,15 +97,13 @@ message EndpointConditions { // serving is identical to ready except that it is set regardless of the // terminating state of endpoints. This condition should be set to true for // a ready endpoint that is terminating. If nil, consumers should defer to - // the ready condition. This field can be enabled with the - // EndpointSliceTerminatingCondition feature gate. + // the ready condition. // +optional optional bool serving = 2; // terminating indicates that this endpoint is terminating. A nil value // indicates an unknown state. Consumers should interpret this unknown state - // to mean that the endpoint is not terminating. This field can be enabled - // with the EndpointSliceTerminatingCondition feature gate. + // to mean that the endpoint is not terminating. // +optional optional bool terminating = 3; } @@ -121,9 +118,8 @@ message EndpointHints { // EndpointPort represents a Port used by an EndpointSlice message EndpointPort { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. @@ -131,17 +127,17 @@ message EndpointPort { // Default is empty string. optional string name = 1; - // The IP protocol for this port. + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. optional string protocol = 2; - // The port number of the endpoint. + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. optional int32 port = 3; - // The application protocol for this port. + // appProtocol represents the application protocol for this port. // This field follows standard Kubernetes label syntax. // Un-prefixed names are reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). @@ -189,7 +185,7 @@ message EndpointSliceList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // List of endpoint slices + // items is the list of endpoint slices repeated EndpointSlice items = 2; } diff --git a/vendor/k8s.io/api/discovery/v1beta1/types.go b/vendor/k8s.io/api/discovery/v1beta1/types.go index 8620004f4..f09f7f320 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types.go @@ -33,9 +33,11 @@ import ( // labels, which must be joined to produce the full set of endpoints. type EndpointSlice struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // addressType specifies the type of address carried by this EndpointSlice. // All addresses in this slice must be the same type. This field is // immutable after creation. The following address types are currently @@ -44,10 +46,12 @@ type EndpointSlice struct { // * IPv6: Represents an IPv6 Address. // * FQDN: Represents a Fully Qualified Domain Name. AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + // endpoints is a list of unique endpoints in this slice. Each slice may // include a maximum of 1000 endpoints. // +listType=atomic Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` + // ports specifies the list of network ports exposed by each endpoint in // this slice. Each port must have a unique name. When ports is empty, it // indicates that there are no defined ports. When a port is defined with a @@ -64,8 +68,10 @@ type AddressType string const ( // AddressTypeIPv4 represents an IPv4 Address. AddressTypeIPv4 = AddressType(v1.IPv4Protocol) + // AddressTypeIPv6 represents an IPv6 Address. AddressTypeIPv6 = AddressType(v1.IPv6Protocol) + // AddressTypeFQDN represents a FQDN. AddressTypeFQDN = AddressType("FQDN") ) @@ -80,8 +86,10 @@ type Endpoint struct { // use the first element. Refer to: https://issue.k8s.io/106267 // +listType=set Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` + // conditions contains information about the current status of the endpoint. Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"` + // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered @@ -89,10 +97,12 @@ type Endpoint struct { // Label (RFC 1123) validation. // +optional Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // targetRef is a reference to a Kubernetes object that represents this // endpoint. // +optional TargetRef *v1.ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,4,opt,name=targetRef"` + // topology contains arbitrary topology information associated with the // endpoint. These key/value pairs must conform with the label format. // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels @@ -108,11 +118,12 @@ type Endpoint struct { // This field is deprecated and will be removed in future api versions. // +optional Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"` + // nodeName represents the name of the Node hosting this endpoint. This can - // be used to determine endpoints local to a Node. This field can be enabled - // with the EndpointSliceNodeName feature gate. + // be used to determine endpoints local to a Node. // +optional NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"` + // hints contains information associated with how an endpoint should be // consumed. // +featureGate=TopologyAwareHints @@ -133,15 +144,13 @@ type EndpointConditions struct { // serving is identical to ready except that it is set regardless of the // terminating state of endpoints. This condition should be set to true for // a ready endpoint that is terminating. If nil, consumers should defer to - // the ready condition. This field can be enabled with the - // EndpointSliceTerminatingCondition feature gate. + // the ready condition. // +optional Serving *bool `json:"serving,omitempty" protobuf:"bytes,2,name=serving"` // terminating indicates that this endpoint is terminating. A nil value // indicates an unknown state. Consumers should interpret this unknown state - // to mean that the endpoint is not terminating. This field can be enabled - // with the EndpointSliceTerminatingCondition feature gate. + // to mean that the endpoint is not terminating. // +optional Terminating *bool `json:"terminating,omitempty" protobuf:"bytes,3,name=terminating"` } @@ -162,24 +171,26 @@ type ForZone struct { // EndpointPort represents a Port used by an EndpointSlice type EndpointPort struct { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. // * must start and end with an alphanumeric character. // Default is empty string. Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` - // The IP protocol for this port. + + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` - // The port number of the endpoint. + + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` - // The application protocol for this port. + + // appProtocol represents the application protocol for this port. // This field follows standard Kubernetes label syntax. // Un-prefixed names are reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). @@ -198,9 +209,11 @@ type EndpointPort struct { // EndpointSliceList represents a list of endpoint slices type EndpointSliceList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // List of endpoint slices + + // items is the list of endpoint slices Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go index 96e6d13ef..b1d4c306c 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Endpoint = map[string]string{ @@ -34,7 +34,7 @@ var map_Endpoint = map[string]string{ "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation.", "targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.", "topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.\nThis field is deprecated and will be removed in future api versions.", - "nodeName": "nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node. This field can be enabled with the EndpointSliceNodeName feature gate.", + "nodeName": "nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node.", "hints": "hints contains information associated with how an endpoint should be consumed.", } @@ -45,8 +45,8 @@ func (Endpoint) SwaggerDoc() map[string]string { var map_EndpointConditions = map[string]string{ "": "EndpointConditions represents the current condition of an endpoint.", "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints.", - "serving": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.", - "terminating": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.", + "serving": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition.", + "terminating": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating.", } func (EndpointConditions) SwaggerDoc() map[string]string { @@ -64,10 +64,10 @@ func (EndpointHints) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", - "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", + "appProtocol": "appProtocol represents the application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -89,7 +89,7 @@ func (EndpointSlice) SwaggerDoc() map[string]string { var map_EndpointSliceList = map[string]string{ "": "EndpointSliceList represents a list of endpoint slices", "metadata": "Standard list metadata.", - "items": "List of endpoint slices", + "items": "items is the list of endpoint slices", } func (EndpointSliceList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go index 797da63bb..44ac0c3bb 100644 --- a/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Event = map[string]string{ diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go index 0e6bd5a83..e6c28a4f8 100644 --- a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Event = map[string]string{ diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index db6b56bb2..d967e3810 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -49,94 +49,10 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } -func (*AllowedCSIDriver) ProtoMessage() {} -func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{0} -} -func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedCSIDriver.Merge(m, src) -} -func (m *AllowedCSIDriver) XXX_Size() int { - return m.Size() -} -func (m *AllowedCSIDriver) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo - -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{1} -} -func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedFlexVolume.Merge(m, src) -} -func (m *AllowedFlexVolume) XXX_Size() int { - return m.Size() -} -func (m *AllowedFlexVolume) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo - -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{2} -} -func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedHostPath) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedHostPath.Merge(m, src) -} -func (m *AllowedHostPath) XXX_Size() int { - return m.Size() -} -func (m *AllowedHostPath) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo - func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} func (*DaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{3} + return fileDescriptor_cdc93917efc28165, []int{0} } func (m *DaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -164,7 +80,7 @@ var xxx_messageInfo_DaemonSet proto.InternalMessageInfo func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } func (*DaemonSetCondition) ProtoMessage() {} func (*DaemonSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{4} + return fileDescriptor_cdc93917efc28165, []int{1} } func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -192,7 +108,7 @@ var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} func (*DaemonSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{5} + return fileDescriptor_cdc93917efc28165, []int{2} } func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -220,7 +136,7 @@ var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} func (*DaemonSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{6} + return fileDescriptor_cdc93917efc28165, []int{3} } func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -248,7 +164,7 @@ var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} func (*DaemonSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{7} + return fileDescriptor_cdc93917efc28165, []int{4} } func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -276,7 +192,7 @@ var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{8} + return fileDescriptor_cdc93917efc28165, []int{5} } func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,7 +220,7 @@ var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} func (*Deployment) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{9} + return fileDescriptor_cdc93917efc28165, []int{6} } func (m *Deployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -332,7 +248,7 @@ var xxx_messageInfo_Deployment proto.InternalMessageInfo func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} func (*DeploymentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{10} + return fileDescriptor_cdc93917efc28165, []int{7} } func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +276,7 @@ var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} func (*DeploymentList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{11} + return fileDescriptor_cdc93917efc28165, []int{8} } func (m *DeploymentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -388,7 +304,7 @@ var xxx_messageInfo_DeploymentList proto.InternalMessageInfo func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } func (*DeploymentRollback) ProtoMessage() {} func (*DeploymentRollback) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{12} + return fileDescriptor_cdc93917efc28165, []int{9} } func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +332,7 @@ var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} func (*DeploymentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{13} + return fileDescriptor_cdc93917efc28165, []int{10} } func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +360,7 @@ var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} func (*DeploymentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{14} + return fileDescriptor_cdc93917efc28165, []int{11} } func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -472,7 +388,7 @@ var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} func (*DeploymentStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{15} + return fileDescriptor_cdc93917efc28165, []int{12} } func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -497,38 +413,10 @@ func (m *DeploymentStrategy) XXX_DiscardUnknown() { var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{16} -} -func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) -} -func (m *FSGroupStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo - func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} func (*HTTPIngressPath) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{17} + return fileDescriptor_cdc93917efc28165, []int{13} } func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +444,7 @@ var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (*HTTPIngressRuleValue) ProtoMessage() {} func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{18} + return fileDescriptor_cdc93917efc28165, []int{14} } func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -581,66 +469,10 @@ func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{19} -} -func (m *HostPortRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HostPortRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_HostPortRange.Merge(m, src) -} -func (m *HostPortRange) XXX_Size() int { - return m.Size() -} -func (m *HostPortRange) XXX_DiscardUnknown() { - xxx_messageInfo_HostPortRange.DiscardUnknown(m) -} - -var xxx_messageInfo_HostPortRange proto.InternalMessageInfo - -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{20} -} -func (m *IDRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IDRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_IDRange.Merge(m, src) -} -func (m *IDRange) XXX_Size() int { - return m.Size() -} -func (m *IDRange) XXX_DiscardUnknown() { - xxx_messageInfo_IDRange.DiscardUnknown(m) -} - -var xxx_messageInfo_IDRange proto.InternalMessageInfo - func (m *IPBlock) Reset() { *m = IPBlock{} } func (*IPBlock) ProtoMessage() {} func (*IPBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{21} + return fileDescriptor_cdc93917efc28165, []int{15} } func (m *IPBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +500,7 @@ var xxx_messageInfo_IPBlock proto.InternalMessageInfo func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} func (*Ingress) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{22} + return fileDescriptor_cdc93917efc28165, []int{16} } func (m *Ingress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +528,7 @@ var xxx_messageInfo_Ingress proto.InternalMessageInfo func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} func (*IngressBackend) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{23} + return fileDescriptor_cdc93917efc28165, []int{17} } func (m *IngressBackend) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +556,7 @@ var xxx_messageInfo_IngressBackend proto.InternalMessageInfo func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} func (*IngressList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{24} + return fileDescriptor_cdc93917efc28165, []int{18} } func (m *IngressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -749,10 +581,94 @@ func (m *IngressList) XXX_DiscardUnknown() { var xxx_messageInfo_IngressList proto.InternalMessageInfo +func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} } +func (*IngressLoadBalancerIngress) ProtoMessage() {} +func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{19} +} +func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressLoadBalancerIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressLoadBalancerIngress) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressLoadBalancerIngress.Merge(m, src) +} +func (m *IngressLoadBalancerIngress) XXX_Size() int { + return m.Size() +} +func (m *IngressLoadBalancerIngress) XXX_DiscardUnknown() { + xxx_messageInfo_IngressLoadBalancerIngress.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo + +func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} } +func (*IngressLoadBalancerStatus) ProtoMessage() {} +func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{20} +} +func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressLoadBalancerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressLoadBalancerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressLoadBalancerStatus.Merge(m, src) +} +func (m *IngressLoadBalancerStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressLoadBalancerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressLoadBalancerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo + +func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} } +func (*IngressPortStatus) ProtoMessage() {} +func (*IngressPortStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{21} +} +func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressPortStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressPortStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressPortStatus.Merge(m, src) +} +func (m *IngressPortStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressPortStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressPortStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo + func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} func (*IngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{25} + return fileDescriptor_cdc93917efc28165, []int{22} } func (m *IngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +696,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} func (*IngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{26} + return fileDescriptor_cdc93917efc28165, []int{23} } func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +724,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} func (*IngressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{27} + return fileDescriptor_cdc93917efc28165, []int{24} } func (m *IngressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -836,7 +752,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} func (*IngressStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{28} + return fileDescriptor_cdc93917efc28165, []int{25} } func (m *IngressStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -864,7 +780,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} func (*IngressTLS) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{29} + return fileDescriptor_cdc93917efc28165, []int{26} } func (m *IngressTLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +808,7 @@ var xxx_messageInfo_IngressTLS proto.InternalMessageInfo func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (*NetworkPolicy) ProtoMessage() {} func (*NetworkPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{30} + return fileDescriptor_cdc93917efc28165, []int{27} } func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +836,7 @@ var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{31} + return fileDescriptor_cdc93917efc28165, []int{28} } func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -948,7 +864,7 @@ var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{32} + return fileDescriptor_cdc93917efc28165, []int{29} } func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +892,7 @@ var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (*NetworkPolicyList) ProtoMessage() {} func (*NetworkPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{33} + return fileDescriptor_cdc93917efc28165, []int{30} } func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +920,7 @@ var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (*NetworkPolicyPeer) ProtoMessage() {} func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{34} + return fileDescriptor_cdc93917efc28165, []int{31} } func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +948,7 @@ var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (*NetworkPolicyPort) ProtoMessage() {} func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{35} + return fileDescriptor_cdc93917efc28165, []int{32} } func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +976,7 @@ var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (*NetworkPolicySpec) ProtoMessage() {} func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{36} + return fileDescriptor_cdc93917efc28165, []int{33} } func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1085,15 +1001,15 @@ func (m *NetworkPolicySpec) XXX_DiscardUnknown() { var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo -func (m *NetworkPolicyStatus) Reset() { *m = NetworkPolicyStatus{} } -func (*NetworkPolicyStatus) ProtoMessage() {} -func (*NetworkPolicyStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{37} +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{34} } -func (m *NetworkPolicyStatus) XXX_Unmarshal(b []byte) error { +func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *NetworkPolicyStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -1101,27 +1017,27 @@ func (m *NetworkPolicyStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, } return b[:n], nil } -func (m *NetworkPolicyStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkPolicyStatus.Merge(m, src) +func (m *ReplicaSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSet.Merge(m, src) } -func (m *NetworkPolicyStatus) XXX_Size() int { +func (m *ReplicaSet) XXX_Size() int { return m.Size() } -func (m *NetworkPolicyStatus) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkPolicyStatus.DiscardUnknown(m) +func (m *ReplicaSet) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSet.DiscardUnknown(m) } -var xxx_messageInfo_NetworkPolicyStatus proto.InternalMessageInfo +var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{38} +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{35} } -func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { +func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -1129,27 +1045,27 @@ func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, e } return b[:n], nil } -func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicy.Merge(m, src) +func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetCondition.Merge(m, src) } -func (m *PodSecurityPolicy) XXX_Size() int { +func (m *ReplicaSetCondition) XXX_Size() int { return m.Size() } -func (m *PodSecurityPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) +func (m *ReplicaSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m) } -var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo +var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{39} +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{36} } -func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { +func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -1157,134 +1073,22 @@ func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byt } return b[:n], nil } -func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) +func (m *ReplicaSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetList.Merge(m, src) } -func (m *PodSecurityPolicyList) XXX_Size() int { +func (m *ReplicaSetList) XXX_Size() int { return m.Size() } -func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) +func (m *ReplicaSetList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetList.DiscardUnknown(m) } -var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo - -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{40} -} -func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) -} -func (m *PodSecurityPolicySpec) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo - -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{41} -} -func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ReplicaSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicaSet.Merge(m, src) -} -func (m *ReplicaSet) XXX_Size() int { - return m.Size() -} -func (m *ReplicaSet) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicaSet.DiscardUnknown(m) -} - -var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo - -func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } -func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{42} -} -func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicaSetCondition.Merge(m, src) -} -func (m *ReplicaSetCondition) XXX_Size() int { - return m.Size() -} -func (m *ReplicaSetCondition) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m) -} - -var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo - -func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } -func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{43} -} -func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ReplicaSetList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicaSetList.Merge(m, src) -} -func (m *ReplicaSetList) XXX_Size() int { - return m.Size() -} -func (m *ReplicaSetList) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicaSetList.DiscardUnknown(m) -} - -var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo +var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{44} + return fileDescriptor_cdc93917efc28165, []int{37} } func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1312,7 +1116,7 @@ var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{45} + return fileDescriptor_cdc93917efc28165, []int{38} } func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1340,7 +1144,7 @@ var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} func (*RollbackConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{46} + return fileDescriptor_cdc93917efc28165, []int{39} } func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1172,7 @@ var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{47} + return fileDescriptor_cdc93917efc28165, []int{40} } func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1396,7 +1200,7 @@ var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{48} + return fileDescriptor_cdc93917efc28165, []int{41} } func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1421,122 +1225,10 @@ func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo -func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } -func (*RunAsGroupStrategyOptions) ProtoMessage() {} -func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{49} -} -func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) -} -func (m *RunAsGroupStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo - -func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } -func (*RunAsUserStrategyOptions) ProtoMessage() {} -func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{50} -} -func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) -} -func (m *RunAsUserStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo - -func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } -func (*RuntimeClassStrategyOptions) ProtoMessage() {} -func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{51} -} -func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) -} -func (m *RuntimeClassStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo - -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{52} -} -func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) -} -func (m *SELinuxStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo - func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{53} + return fileDescriptor_cdc93917efc28165, []int{42} } func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1564,7 +1256,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{54} + return fileDescriptor_cdc93917efc28165, []int{43} } func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1592,7 +1284,7 @@ var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{55} + return fileDescriptor_cdc93917efc28165, []int{44} } func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1617,38 +1309,7 @@ func (m *ScaleStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo -func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } -func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} -func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{56} -} -func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo - func init() { - proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.extensions.v1beta1.AllowedCSIDriver") - proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") - proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath") proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet") proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetCondition") proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetList") @@ -1663,15 +1324,15 @@ func init() { proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.extensions.v1beta1.DeploymentSpec") proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStatus") proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStrategy") - proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.FSGroupStrategyOptions") proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressPath") proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressRuleValue") - proto.RegisterType((*HostPortRange)(nil), "k8s.io.api.extensions.v1beta1.HostPortRange") - proto.RegisterType((*IDRange)(nil), "k8s.io.api.extensions.v1beta1.IDRange") proto.RegisterType((*IPBlock)(nil), "k8s.io.api.extensions.v1beta1.IPBlock") proto.RegisterType((*Ingress)(nil), "k8s.io.api.extensions.v1beta1.Ingress") proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.extensions.v1beta1.IngressBackend") proto.RegisterType((*IngressList)(nil), "k8s.io.api.extensions.v1beta1.IngressList") + proto.RegisterType((*IngressLoadBalancerIngress)(nil), "k8s.io.api.extensions.v1beta1.IngressLoadBalancerIngress") + proto.RegisterType((*IngressLoadBalancerStatus)(nil), "k8s.io.api.extensions.v1beta1.IngressLoadBalancerStatus") + proto.RegisterType((*IngressPortStatus)(nil), "k8s.io.api.extensions.v1beta1.IngressPortStatus") proto.RegisterType((*IngressRule)(nil), "k8s.io.api.extensions.v1beta1.IngressRule") proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.IngressRuleValue") proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.extensions.v1beta1.IngressSpec") @@ -1684,10 +1345,6 @@ func init() { proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPeer") proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPort") proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicySpec") - proto.RegisterType((*NetworkPolicyStatus)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyStatus") - proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicy") - proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicyList") - proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicySpec") proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSet") proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetCondition") proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetList") @@ -1696,15 +1353,10 @@ func init() { proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.extensions.v1beta1.RollbackConfig") proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDaemonSet") proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment") - proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsGroupStrategyOptions") - proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsUserStrategyOptions") - proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RuntimeClassStrategyOptions") - proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*Scale)(nil), "k8s.io.api.extensions.v1beta1.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus.SelectorEntry") - proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") } func init() { @@ -1712,248 +1364,189 @@ func init() { } var fileDescriptor_cdc93917efc28165 = []byte{ - // 3797 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0x4d, 0x6c, 0x1c, 0xc9, - 0x75, 0x56, 0xcf, 0x0c, 0x39, 0xc3, 0x47, 0xf1, 0xaf, 0x48, 0x91, 0x63, 0xc9, 0xe2, 0xc8, 0xbd, - 0x88, 0xa2, 0xdd, 0x68, 0x67, 0x56, 0x5c, 0xad, 0xbc, 0x59, 0x21, 0xf6, 0x72, 0x48, 0x51, 0xa2, - 0xc3, 0x9f, 0xd9, 0x1a, 0x52, 0x36, 0x16, 0x59, 0x67, 0x9b, 0x3d, 0xc5, 0x61, 0x2f, 0x7b, 0xba, - 0xdb, 0x5d, 0x3d, 0x34, 0x27, 0xc8, 0x21, 0x87, 0x5c, 0x0c, 0x04, 0x48, 0x2e, 0x4e, 0x72, 0xcc, - 0x22, 0x40, 0x6e, 0x41, 0x8e, 0xc9, 0xc1, 0x30, 0x12, 0xc4, 0x01, 0x84, 0xc0, 0x09, 0x0c, 0xe4, - 0x10, 0x9f, 0x88, 0x2c, 0x7d, 0x0a, 0x72, 0xca, 0x2d, 0xd0, 0x29, 0xa8, 0xea, 0xea, 0xff, 0x6e, - 0x4e, 0x0f, 0x2d, 0x11, 0xd9, 0xc0, 0x27, 0x69, 0xea, 0xbd, 0xf7, 0xbd, 0x57, 0x55, 0xaf, 0xde, - 0x7b, 0x55, 0xfd, 0x08, 0x1b, 0xc7, 0xef, 0xd3, 0xba, 0x66, 0x36, 0x8e, 0xfb, 0x07, 0xc4, 0x36, - 0x88, 0x43, 0x68, 0xe3, 0x84, 0x18, 0x1d, 0xd3, 0x6e, 0x08, 0x82, 0x62, 0x69, 0x0d, 0x72, 0xea, - 0x10, 0x83, 0x6a, 0xa6, 0x41, 0x1b, 0x27, 0x0f, 0x0e, 0x88, 0xa3, 0x3c, 0x68, 0x74, 0x89, 0x41, - 0x6c, 0xc5, 0x21, 0x9d, 0xba, 0x65, 0x9b, 0x8e, 0x89, 0x6e, 0xbb, 0xec, 0x75, 0xc5, 0xd2, 0xea, - 0x01, 0x7b, 0x5d, 0xb0, 0xdf, 0x7c, 0xbb, 0xab, 0x39, 0x47, 0xfd, 0x83, 0xba, 0x6a, 0xf6, 0x1a, - 0x5d, 0xb3, 0x6b, 0x36, 0xb8, 0xd4, 0x41, 0xff, 0x90, 0xff, 0xe2, 0x3f, 0xf8, 0xff, 0x5c, 0xb4, - 0x9b, 0x72, 0x48, 0xb9, 0x6a, 0xda, 0xa4, 0x71, 0x92, 0xd0, 0x78, 0xf3, 0x61, 0xc0, 0xd3, 0x53, - 0xd4, 0x23, 0xcd, 0x20, 0xf6, 0xa0, 0x61, 0x1d, 0x77, 0xd9, 0x00, 0x6d, 0xf4, 0x88, 0xa3, 0xa4, - 0x49, 0x35, 0xb2, 0xa4, 0xec, 0xbe, 0xe1, 0x68, 0x3d, 0x92, 0x10, 0x78, 0x34, 0x4c, 0x80, 0xaa, - 0x47, 0xa4, 0xa7, 0x24, 0xe4, 0xde, 0xcd, 0x92, 0xeb, 0x3b, 0x9a, 0xde, 0xd0, 0x0c, 0x87, 0x3a, - 0x76, 0x5c, 0x48, 0x7e, 0x08, 0xb3, 0xab, 0xba, 0x6e, 0x7e, 0x9f, 0x74, 0xd6, 0xda, 0x9b, 0xeb, - 0xb6, 0x76, 0x42, 0x6c, 0x74, 0x07, 0x4a, 0x86, 0xd2, 0x23, 0x55, 0xe9, 0x8e, 0x74, 0x6f, 0xa2, - 0x79, 0xfd, 0xc5, 0x59, 0xed, 0xda, 0xf9, 0x59, 0xad, 0xb4, 0xa3, 0xf4, 0x08, 0xe6, 0x14, 0xf9, - 0x31, 0xcc, 0x09, 0xa9, 0x0d, 0x9d, 0x9c, 0x3e, 0x37, 0xf5, 0x7e, 0x8f, 0xa0, 0xbb, 0x30, 0xde, - 0xe1, 0x00, 0x42, 0x70, 0x5a, 0x08, 0x8e, 0xbb, 0xb0, 0x58, 0x50, 0x65, 0x0a, 0x33, 0x42, 0xf8, - 0x99, 0x49, 0x9d, 0x96, 0xe2, 0x1c, 0xa1, 0x15, 0x00, 0x4b, 0x71, 0x8e, 0x5a, 0x36, 0x39, 0xd4, - 0x4e, 0x85, 0x38, 0x12, 0xe2, 0xd0, 0xf2, 0x29, 0x38, 0xc4, 0x85, 0xee, 0x43, 0xc5, 0x26, 0x4a, - 0x67, 0xd7, 0xd0, 0x07, 0xd5, 0xc2, 0x1d, 0xe9, 0x5e, 0xa5, 0x39, 0x2b, 0x24, 0x2a, 0x58, 0x8c, - 0x63, 0x9f, 0x43, 0xfe, 0x61, 0x01, 0x26, 0xd6, 0x15, 0xd2, 0x33, 0x8d, 0x36, 0x71, 0xd0, 0xa7, - 0x50, 0x61, 0xdb, 0xd5, 0x51, 0x1c, 0x85, 0x6b, 0x9b, 0x5c, 0x79, 0xa7, 0x1e, 0xb8, 0x93, 0xbf, - 0x7a, 0x75, 0xeb, 0xb8, 0xcb, 0x06, 0x68, 0x9d, 0x71, 0xd7, 0x4f, 0x1e, 0xd4, 0x77, 0x0f, 0x3e, - 0x23, 0xaa, 0xb3, 0x4d, 0x1c, 0x25, 0xb0, 0x2f, 0x18, 0xc3, 0x3e, 0x2a, 0xda, 0x81, 0x12, 0xb5, - 0x88, 0xca, 0x2d, 0x9b, 0x5c, 0xb9, 0x5f, 0xbf, 0xd0, 0x59, 0xeb, 0xbe, 0x65, 0x6d, 0x8b, 0xa8, - 0xc1, 0x8a, 0xb3, 0x5f, 0x98, 0xe3, 0xa0, 0xe7, 0x30, 0x4e, 0x1d, 0xc5, 0xe9, 0xd3, 0x6a, 0x91, - 0x23, 0xd6, 0x73, 0x23, 0x72, 0xa9, 0x60, 0x33, 0xdc, 0xdf, 0x58, 0xa0, 0xc9, 0xff, 0x59, 0x00, - 0xe4, 0xf3, 0xae, 0x99, 0x46, 0x47, 0x73, 0x34, 0xd3, 0x40, 0x1f, 0x40, 0xc9, 0x19, 0x58, 0x9e, - 0x0b, 0xdc, 0xf5, 0x0c, 0xda, 0x1b, 0x58, 0xe4, 0xe5, 0x59, 0x6d, 0x31, 0x29, 0xc1, 0x28, 0x98, - 0xcb, 0xa0, 0x2d, 0xdf, 0xd4, 0x02, 0x97, 0x7e, 0x18, 0x55, 0xfd, 0xf2, 0xac, 0x96, 0x72, 0xd8, - 0xea, 0x3e, 0x52, 0xd4, 0x40, 0x74, 0x02, 0x48, 0x57, 0xa8, 0xb3, 0x67, 0x2b, 0x06, 0x75, 0x35, - 0x69, 0x3d, 0x22, 0x16, 0xe1, 0xad, 0x7c, 0x9b, 0xc6, 0x24, 0x9a, 0x37, 0x85, 0x15, 0x68, 0x2b, - 0x81, 0x86, 0x53, 0x34, 0x30, 0x6f, 0xb6, 0x89, 0x42, 0x4d, 0xa3, 0x5a, 0x8a, 0x7a, 0x33, 0xe6, - 0xa3, 0x58, 0x50, 0xd1, 0x9b, 0x50, 0xee, 0x11, 0x4a, 0x95, 0x2e, 0xa9, 0x8e, 0x71, 0xc6, 0x19, - 0xc1, 0x58, 0xde, 0x76, 0x87, 0xb1, 0x47, 0x97, 0x7f, 0x24, 0xc1, 0x94, 0xbf, 0x72, 0x5b, 0x1a, - 0x75, 0xd0, 0xef, 0x24, 0xfc, 0xb0, 0x9e, 0x6f, 0x4a, 0x4c, 0x9a, 0x7b, 0xa1, 0xef, 0xf3, 0xde, - 0x48, 0xc8, 0x07, 0xb7, 0x61, 0x4c, 0x73, 0x48, 0x8f, 0xed, 0x43, 0xf1, 0xde, 0xe4, 0xca, 0xbd, - 0xbc, 0x2e, 0xd3, 0x9c, 0x12, 0xa0, 0x63, 0x9b, 0x4c, 0x1c, 0xbb, 0x28, 0xf2, 0x9f, 0x96, 0x42, - 0xe6, 0x33, 0xd7, 0x44, 0x9f, 0x40, 0x85, 0x12, 0x9d, 0xa8, 0x8e, 0x69, 0x0b, 0xf3, 0xdf, 0xcd, - 0x69, 0xbe, 0x72, 0x40, 0xf4, 0xb6, 0x10, 0x6d, 0x5e, 0x67, 0xf6, 0x7b, 0xbf, 0xb0, 0x0f, 0x89, - 0x3e, 0x82, 0x8a, 0x43, 0x7a, 0x96, 0xae, 0x38, 0x44, 0x9c, 0xa3, 0x37, 0xc2, 0x53, 0x60, 0x9e, - 0xc3, 0xc0, 0x5a, 0x66, 0x67, 0x4f, 0xb0, 0xf1, 0xe3, 0xe3, 0x2f, 0x89, 0x37, 0x8a, 0x7d, 0x18, - 0x74, 0x02, 0xd3, 0x7d, 0xab, 0xc3, 0x38, 0x1d, 0x16, 0x05, 0xbb, 0x03, 0xe1, 0x49, 0x8f, 0xf2, - 0xae, 0xcd, 0x7e, 0x44, 0xba, 0xb9, 0x28, 0x74, 0x4d, 0x47, 0xc7, 0x71, 0x4c, 0x0b, 0x5a, 0x85, - 0x99, 0x9e, 0x66, 0xb0, 0xb8, 0x34, 0x68, 0x13, 0xd5, 0x34, 0x3a, 0x94, 0xbb, 0xd5, 0x58, 0x73, - 0x49, 0x00, 0xcc, 0x6c, 0x47, 0xc9, 0x38, 0xce, 0x8f, 0xbe, 0x05, 0xc8, 0x9b, 0xc6, 0x53, 0x37, - 0x88, 0x6b, 0xa6, 0xc1, 0x7d, 0xae, 0x18, 0x38, 0xf7, 0x5e, 0x82, 0x03, 0xa7, 0x48, 0xa1, 0x2d, - 0x58, 0xb0, 0xc9, 0x89, 0xc6, 0xe6, 0xf8, 0x4c, 0xa3, 0x8e, 0x69, 0x0f, 0xb6, 0xb4, 0x9e, 0xe6, - 0x54, 0xc7, 0xb9, 0x4d, 0xd5, 0xf3, 0xb3, 0xda, 0x02, 0x4e, 0xa1, 0xe3, 0x54, 0x29, 0xf9, 0xcf, - 0xc6, 0x61, 0x26, 0x16, 0x6f, 0xd0, 0x73, 0x58, 0x54, 0xfb, 0xb6, 0x4d, 0x0c, 0x67, 0xa7, 0xdf, - 0x3b, 0x20, 0x76, 0x5b, 0x3d, 0x22, 0x9d, 0xbe, 0x4e, 0x3a, 0xdc, 0x51, 0xc6, 0x9a, 0xcb, 0xc2, - 0xe2, 0xc5, 0xb5, 0x54, 0x2e, 0x9c, 0x21, 0xcd, 0x56, 0xc1, 0xe0, 0x43, 0xdb, 0x1a, 0xa5, 0x3e, - 0x66, 0x81, 0x63, 0xfa, 0xab, 0xb0, 0x93, 0xe0, 0xc0, 0x29, 0x52, 0xcc, 0xc6, 0x0e, 0xa1, 0x9a, - 0x4d, 0x3a, 0x71, 0x1b, 0x8b, 0x51, 0x1b, 0xd7, 0x53, 0xb9, 0x70, 0x86, 0x34, 0x7a, 0x0f, 0x26, - 0x5d, 0x6d, 0x7c, 0xff, 0xc4, 0x46, 0xcf, 0x0b, 0xb0, 0xc9, 0x9d, 0x80, 0x84, 0xc3, 0x7c, 0x6c, - 0x6a, 0xe6, 0x01, 0x25, 0xf6, 0x09, 0xe9, 0x64, 0x6f, 0xf0, 0x6e, 0x82, 0x03, 0xa7, 0x48, 0xb1, - 0xa9, 0xb9, 0x1e, 0x98, 0x98, 0xda, 0x78, 0x74, 0x6a, 0xfb, 0xa9, 0x5c, 0x38, 0x43, 0x9a, 0xf9, - 0xb1, 0x6b, 0xf2, 0xea, 0x89, 0xa2, 0xe9, 0xca, 0x81, 0x4e, 0xaa, 0xe5, 0xa8, 0x1f, 0xef, 0x44, - 0xc9, 0x38, 0xce, 0x8f, 0x9e, 0xc2, 0x9c, 0x3b, 0xb4, 0x6f, 0x28, 0x3e, 0x48, 0x85, 0x83, 0x7c, - 0x45, 0x80, 0xcc, 0xed, 0xc4, 0x19, 0x70, 0x52, 0x06, 0x7d, 0x00, 0xd3, 0xaa, 0xa9, 0xeb, 0xdc, - 0x1f, 0xd7, 0xcc, 0xbe, 0xe1, 0x54, 0x27, 0x38, 0x0a, 0x62, 0xe7, 0x71, 0x2d, 0x42, 0xc1, 0x31, - 0x4e, 0x44, 0x00, 0x54, 0x2f, 0xe1, 0xd0, 0x2a, 0xf0, 0xf8, 0xf8, 0x20, 0x6f, 0x0c, 0xf0, 0x53, - 0x55, 0x50, 0x03, 0xf8, 0x43, 0x14, 0x87, 0x80, 0xe5, 0x7f, 0x96, 0x60, 0x29, 0x23, 0x74, 0xa0, - 0x6f, 0x46, 0x52, 0xec, 0x6f, 0xc4, 0x52, 0xec, 0xad, 0x0c, 0xb1, 0x50, 0x9e, 0x35, 0x60, 0xca, - 0x66, 0xb3, 0x32, 0xba, 0x2e, 0x8b, 0x88, 0x91, 0xef, 0x0d, 0x99, 0x06, 0x0e, 0xcb, 0x04, 0x31, - 0x7f, 0xee, 0xfc, 0xac, 0x36, 0x15, 0xa1, 0xe1, 0x28, 0xbc, 0xfc, 0xe7, 0x05, 0x80, 0x75, 0x62, - 0xe9, 0xe6, 0xa0, 0x47, 0x8c, 0xab, 0xa8, 0xa1, 0x76, 0x23, 0x35, 0xd4, 0xdb, 0xc3, 0xb6, 0xc7, - 0x37, 0x2d, 0xb3, 0x88, 0xfa, 0x76, 0xac, 0x88, 0x6a, 0xe4, 0x87, 0xbc, 0xb8, 0x8a, 0xfa, 0xf7, - 0x22, 0xcc, 0x07, 0xcc, 0x41, 0x19, 0xf5, 0x38, 0xb2, 0xc7, 0xbf, 0x1e, 0xdb, 0xe3, 0xa5, 0x14, - 0x91, 0xd7, 0x56, 0x47, 0x7d, 0x06, 0xd3, 0xac, 0xca, 0x71, 0xf7, 0x92, 0xd7, 0x50, 0xe3, 0x23, - 0xd7, 0x50, 0x7e, 0xb6, 0xdb, 0x8a, 0x20, 0xe1, 0x18, 0x72, 0x46, 0xcd, 0x56, 0xfe, 0x32, 0xd6, - 0x6c, 0x3f, 0x96, 0x60, 0x3a, 0xd8, 0xa6, 0x2b, 0x28, 0xda, 0x76, 0xa2, 0x45, 0xdb, 0x9b, 0xb9, - 0x5d, 0x34, 0xa3, 0x6a, 0xfb, 0x1f, 0x56, 0xe0, 0xfb, 0x4c, 0xec, 0x80, 0x1f, 0x28, 0xea, 0xf1, - 0xf0, 0x3b, 0x1e, 0xfa, 0xa1, 0x04, 0x48, 0x64, 0x81, 0x55, 0xc3, 0x30, 0x1d, 0xc5, 0x8d, 0x95, - 0xae, 0x59, 0x9b, 0xb9, 0xcd, 0xf2, 0x34, 0xd6, 0xf7, 0x13, 0x58, 0x4f, 0x0c, 0xc7, 0x1e, 0x04, - 0x9b, 0x9c, 0x64, 0xc0, 0x29, 0x06, 0x20, 0x05, 0xc0, 0x16, 0x98, 0x7b, 0xa6, 0x38, 0xc8, 0x6f, - 0xe7, 0x88, 0x79, 0x4c, 0x60, 0xcd, 0x34, 0x0e, 0xb5, 0x6e, 0x10, 0x76, 0xb0, 0x0f, 0x84, 0x43, - 0xa0, 0x37, 0x9f, 0xc0, 0x52, 0x86, 0xb5, 0x68, 0x16, 0x8a, 0xc7, 0x64, 0xe0, 0x2e, 0x1b, 0x66, - 0xff, 0x45, 0x0b, 0x30, 0x76, 0xa2, 0xe8, 0x7d, 0x37, 0xfc, 0x4e, 0x60, 0xf7, 0xc7, 0x07, 0x85, - 0xf7, 0x25, 0xf9, 0x47, 0x63, 0x61, 0xdf, 0xe1, 0x15, 0xf3, 0x3d, 0x76, 0x69, 0xb5, 0x74, 0x4d, - 0x55, 0xa8, 0x28, 0x84, 0xae, 0xbb, 0x17, 0x56, 0x77, 0x0c, 0xfb, 0xd4, 0x48, 0x6d, 0x5d, 0x78, - 0xbd, 0xb5, 0x75, 0xf1, 0xd5, 0xd4, 0xd6, 0xbf, 0x0b, 0x15, 0xea, 0x55, 0xd5, 0x25, 0x0e, 0xf9, - 0x60, 0x84, 0xf8, 0x2a, 0x0a, 0x6a, 0x5f, 0x81, 0x5f, 0x4a, 0xfb, 0xa0, 0x69, 0x45, 0xf4, 0xd8, - 0x88, 0x45, 0xf4, 0x2b, 0x2d, 0x7c, 0x59, 0xbc, 0xb1, 0x94, 0x3e, 0x25, 0x1d, 0x1e, 0xdb, 0x2a, - 0x41, 0xbc, 0x69, 0xf1, 0x51, 0x2c, 0xa8, 0xe8, 0x93, 0x88, 0xcb, 0x56, 0x2e, 0xe3, 0xb2, 0xd3, - 0xd9, 0xee, 0x8a, 0xf6, 0x61, 0xc9, 0xb2, 0xcd, 0xae, 0x4d, 0x28, 0x5d, 0x27, 0x4a, 0x47, 0xd7, - 0x0c, 0xe2, 0xad, 0x8f, 0x5b, 0x11, 0xdd, 0x3a, 0x3f, 0xab, 0x2d, 0xb5, 0xd2, 0x59, 0x70, 0x96, - 0xac, 0xfc, 0xa2, 0x04, 0xb3, 0xf1, 0x0c, 0x98, 0x51, 0xa4, 0x4a, 0x97, 0x2a, 0x52, 0xef, 0x87, - 0x0e, 0x83, 0x5b, 0xc1, 0x87, 0x5e, 0x70, 0x12, 0x07, 0x62, 0x15, 0x66, 0x44, 0x34, 0xf0, 0x88, - 0xa2, 0x4c, 0xf7, 0x77, 0x7f, 0x3f, 0x4a, 0xc6, 0x71, 0x7e, 0xf4, 0x18, 0xa6, 0x6c, 0x5e, 0x77, - 0x7b, 0x00, 0x6e, 0xed, 0x7a, 0x43, 0x00, 0x4c, 0xe1, 0x30, 0x11, 0x47, 0x79, 0x59, 0xdd, 0x1a, - 0x94, 0xa3, 0x1e, 0x40, 0x29, 0x5a, 0xb7, 0xae, 0xc6, 0x19, 0x70, 0x52, 0x06, 0x6d, 0xc3, 0x7c, - 0xdf, 0x48, 0x42, 0xb9, 0xae, 0x7c, 0x4b, 0x40, 0xcd, 0xef, 0x27, 0x59, 0x70, 0x9a, 0x1c, 0x3a, - 0x8c, 0x94, 0xb2, 0xe3, 0x3c, 0x3c, 0xaf, 0xe4, 0x3e, 0x78, 0xb9, 0x6b, 0xd9, 0x94, 0x72, 0xbb, - 0x92, 0xb7, 0xdc, 0x96, 0xff, 0x41, 0x0a, 0x27, 0x21, 0xbf, 0x04, 0x1e, 0xf6, 0xca, 0x94, 0x90, - 0x08, 0x55, 0x47, 0x66, 0x7a, 0xf5, 0xfb, 0x68, 0xa4, 0xea, 0x37, 0x48, 0x9e, 0xc3, 0xcb, 0xdf, - 0xcf, 0x25, 0x58, 0xdc, 0x68, 0x3f, 0xb5, 0xcd, 0xbe, 0xe5, 0x99, 0xb3, 0x6b, 0xb9, 0x4b, 0xf3, - 0x75, 0x28, 0xd9, 0x7d, 0xdd, 0x9b, 0xc7, 0x1b, 0xde, 0x3c, 0x70, 0x5f, 0x67, 0xf3, 0x98, 0x8f, - 0x49, 0xb9, 0x93, 0x60, 0x02, 0x68, 0x07, 0xc6, 0x6d, 0xc5, 0xe8, 0x12, 0x2f, 0xad, 0xde, 0x1d, - 0x62, 0xfd, 0xe6, 0x3a, 0x66, 0xec, 0xa1, 0xc2, 0x86, 0x4b, 0x63, 0x81, 0x22, 0xff, 0xa3, 0x04, - 0x33, 0xcf, 0xf6, 0xf6, 0x5a, 0x9b, 0x06, 0x3f, 0xd1, 0xfc, 0x6d, 0xf5, 0x0e, 0x94, 0x2c, 0xc5, - 0x39, 0x8a, 0x67, 0x7a, 0x46, 0xc3, 0x9c, 0x82, 0x1e, 0x42, 0x85, 0xfd, 0xcb, 0xec, 0xe2, 0x47, - 0x6a, 0x82, 0x07, 0xc2, 0x4a, 0x4b, 0x8c, 0xbd, 0x0c, 0xfd, 0x1f, 0xfb, 0x9c, 0xe8, 0x3b, 0x50, - 0x66, 0xf1, 0x87, 0x18, 0x9d, 0x9c, 0x05, 0xba, 0x30, 0xaa, 0xe9, 0x0a, 0x05, 0x35, 0x97, 0x18, - 0xc0, 0x1e, 0x9c, 0x7c, 0x0c, 0x0b, 0xa1, 0x49, 0xb0, 0x55, 0x7c, 0xce, 0x72, 0x2a, 0x6a, 0xc3, - 0x18, 0xd3, 0xce, 0x32, 0x67, 0x31, 0xc7, 0x13, 0x68, 0x6c, 0x21, 0x82, 0xfa, 0x88, 0xfd, 0xa2, - 0xd8, 0xc5, 0x92, 0xb7, 0x61, 0x8a, 0x3f, 0x43, 0x9b, 0xb6, 0xc3, 0x17, 0x13, 0xdd, 0x86, 0x62, - 0x4f, 0x33, 0x44, 0x76, 0x9e, 0x14, 0x32, 0x45, 0x96, 0x59, 0xd8, 0x38, 0x27, 0x2b, 0xa7, 0x22, - 0x5e, 0x05, 0x64, 0xe5, 0x14, 0xb3, 0x71, 0xf9, 0x29, 0x94, 0xc5, 0x26, 0x85, 0x81, 0x8a, 0x17, - 0x03, 0x15, 0x53, 0x80, 0x76, 0xa1, 0xbc, 0xd9, 0x6a, 0xea, 0xa6, 0x5b, 0xab, 0xa9, 0x5a, 0xc7, - 0x8e, 0xef, 0xe0, 0xda, 0xe6, 0x3a, 0xc6, 0x9c, 0x82, 0x64, 0x18, 0x27, 0xa7, 0x2a, 0xb1, 0x1c, - 0xee, 0x47, 0x13, 0x4d, 0x60, 0xbe, 0xf1, 0x84, 0x8f, 0x60, 0x41, 0x91, 0xff, 0xb8, 0x00, 0x65, - 0xb1, 0x1c, 0x57, 0x70, 0x77, 0xdb, 0x8a, 0xdc, 0xdd, 0xde, 0xca, 0xe7, 0x1a, 0x99, 0x17, 0xb7, - 0xbd, 0xd8, 0xc5, 0xed, 0x7e, 0x4e, 0xbc, 0x8b, 0x6f, 0x6d, 0x3f, 0x28, 0xc0, 0x74, 0xd4, 0x29, - 0xd1, 0x7b, 0x30, 0xc9, 0xd2, 0x94, 0xa6, 0x92, 0x9d, 0xa0, 0x3a, 0xf6, 0x9f, 0x6e, 0xda, 0x01, - 0x09, 0x87, 0xf9, 0x50, 0xd7, 0x17, 0x63, 0x7e, 0x24, 0x26, 0x9d, 0xbd, 0xa4, 0x7d, 0x47, 0xd3, - 0xeb, 0xee, 0x07, 0x99, 0xfa, 0xa6, 0xe1, 0xec, 0xda, 0x6d, 0xc7, 0xd6, 0x8c, 0x6e, 0x42, 0x11, - 0x77, 0xca, 0x30, 0x32, 0xfa, 0x36, 0x4b, 0x99, 0xd4, 0xec, 0xdb, 0x2a, 0x49, 0x2b, 0x7d, 0xbd, - 0xb2, 0x8d, 0x1d, 0xd0, 0xce, 0x96, 0xa9, 0x2a, 0xba, 0xbb, 0x39, 0x98, 0x1c, 0x12, 0x9b, 0x18, - 0x2a, 0xf1, 0xca, 0x4d, 0x17, 0x02, 0xfb, 0x60, 0xf2, 0xdf, 0x49, 0x30, 0x29, 0xd6, 0xe2, 0x0a, - 0x2e, 0x39, 0xbf, 0x1d, 0xbd, 0xe4, 0xdc, 0xcd, 0x19, 0x39, 0xd2, 0x6f, 0x38, 0x7f, 0x15, 0x98, - 0xce, 0x62, 0x05, 0x3b, 0x2e, 0x47, 0x26, 0x75, 0xe2, 0xc7, 0x85, 0x9d, 0x72, 0xcc, 0x29, 0xa8, - 0x0f, 0xb3, 0x5a, 0x2c, 0xb8, 0x88, 0x3d, 0x6b, 0xe4, 0xb3, 0xc4, 0x17, 0x6b, 0x56, 0x05, 0xfc, - 0x6c, 0x9c, 0x82, 0x13, 0x2a, 0x64, 0x02, 0x09, 0x2e, 0xf4, 0x11, 0x94, 0x8e, 0x1c, 0xc7, 0x4a, - 0x79, 0x3e, 0x1f, 0x12, 0xd2, 0x02, 0x13, 0x2a, 0x7c, 0x76, 0x7b, 0x7b, 0x2d, 0xcc, 0xa1, 0xe4, - 0xbf, 0x2f, 0xf8, 0xeb, 0xc1, 0xef, 0x1c, 0x1f, 0xfa, 0xb3, 0x5d, 0xd3, 0x15, 0x4a, 0xb9, 0x63, - 0xbb, 0xf7, 0xe3, 0x85, 0x90, 0xe1, 0x3e, 0x0d, 0x27, 0xb8, 0xd1, 0x5e, 0x10, 0xea, 0xa5, 0xcb, - 0x84, 0xfa, 0xc9, 0xb4, 0x30, 0x8f, 0x9e, 0x41, 0xd1, 0xd1, 0xf3, 0xde, 0x73, 0x05, 0xe2, 0xde, - 0x56, 0x3b, 0x88, 0x95, 0x7b, 0x5b, 0x6d, 0xcc, 0x20, 0xd0, 0x2e, 0x8c, 0xb1, 0x74, 0xca, 0xa2, - 0x43, 0x31, 0x7f, 0xb4, 0x61, 0x2b, 0x18, 0xb8, 0x14, 0xfb, 0x45, 0xb1, 0x8b, 0x23, 0x7f, 0x0f, - 0xa6, 0x22, 0x21, 0x04, 0x7d, 0x0a, 0xd7, 0x75, 0x53, 0xe9, 0x34, 0x15, 0x5d, 0x31, 0x54, 0xe2, - 0x7d, 0xed, 0xb8, 0x9b, 0x76, 0xf6, 0xb6, 0x42, 0x7c, 0x22, 0x00, 0x2d, 0x08, 0x25, 0xd7, 0xc3, - 0x34, 0x1c, 0x41, 0x94, 0x15, 0x80, 0x60, 0x8e, 0xa8, 0x06, 0x63, 0xcc, 0x53, 0xdd, 0x54, 0x37, - 0xd1, 0x9c, 0x60, 0x16, 0x32, 0x07, 0xa6, 0xd8, 0x1d, 0x47, 0x2b, 0x00, 0x94, 0xa8, 0x36, 0x71, - 0xf8, 0x76, 0x16, 0xa2, 0x5f, 0x4c, 0xdb, 0x3e, 0x05, 0x87, 0xb8, 0xe4, 0xcf, 0x0b, 0x30, 0xb5, - 0x43, 0x9c, 0xef, 0x9b, 0xf6, 0x71, 0xcb, 0xd4, 0x35, 0x75, 0x70, 0x05, 0x79, 0x00, 0x47, 0xf2, - 0xc0, 0x3b, 0x43, 0x76, 0x26, 0x62, 0x5d, 0x66, 0x36, 0xf8, 0x38, 0x96, 0x0d, 0x56, 0x46, 0x42, - 0xbd, 0x38, 0x27, 0xfc, 0x58, 0x82, 0xa5, 0x08, 0xff, 0x93, 0x20, 0xb0, 0xec, 0xc3, 0x98, 0x65, - 0xda, 0x8e, 0x57, 0x7f, 0x8c, 0x34, 0x19, 0x16, 0xbd, 0x43, 0x15, 0x08, 0x83, 0xc1, 0x2e, 0x1a, - 0xda, 0x82, 0x82, 0x63, 0x8a, 0x63, 0x30, 0x1a, 0x26, 0x21, 0x76, 0x13, 0x04, 0x66, 0x61, 0xcf, - 0xc4, 0x05, 0xc7, 0x94, 0xff, 0x49, 0x82, 0x6a, 0x84, 0x2b, 0x1c, 0x1a, 0x5f, 0xd3, 0x0c, 0x30, - 0x94, 0x0e, 0x6d, 0xb3, 0x77, 0xe9, 0x39, 0xf8, 0x9b, 0xbc, 0x61, 0x9b, 0x3d, 0xcc, 0xb1, 0xe4, - 0x9f, 0x48, 0x30, 0x17, 0xe1, 0xbc, 0x82, 0xb4, 0xf4, 0x51, 0x34, 0x2d, 0xdd, 0x1f, 0x65, 0x22, - 0x19, 0xc9, 0xe9, 0x27, 0x85, 0xd8, 0x34, 0xd8, 0x84, 0xd1, 0x21, 0x4c, 0x5a, 0x66, 0xa7, 0xfd, - 0x0a, 0xbe, 0x9d, 0xce, 0xb0, 0x72, 0xa1, 0x15, 0x60, 0xe1, 0x30, 0x30, 0x3a, 0x85, 0x39, 0x43, - 0xe9, 0x11, 0x6a, 0x29, 0x2a, 0x69, 0xbf, 0x82, 0xd7, 0xa4, 0x1b, 0xfc, 0xe3, 0x4c, 0x1c, 0x11, - 0x27, 0x95, 0xa0, 0x6d, 0x28, 0x6b, 0x16, 0x2f, 0x5f, 0xc5, 0x21, 0x1d, 0x9a, 0xe3, 0xdd, 0x62, - 0xd7, 0xcd, 0x15, 0xe2, 0x07, 0xf6, 0x30, 0xe4, 0x7f, 0x8b, 0x7b, 0x03, 0xaf, 0x86, 0x9e, 0x42, - 0x85, 0x77, 0xb1, 0xa8, 0xa6, 0xee, 0x7d, 0x46, 0xe1, 0x17, 0x17, 0x31, 0xf6, 0xf2, 0xac, 0x76, - 0x2b, 0xe5, 0x85, 0xdc, 0x23, 0x63, 0x5f, 0x18, 0xed, 0x40, 0xc9, 0xfa, 0x65, 0x0a, 0x37, 0x9e, - 0x82, 0x79, 0xb5, 0xc6, 0x71, 0xd0, 0xaf, 0x41, 0x99, 0x18, 0x1d, 0x5e, 0x0b, 0xba, 0x6f, 0x14, - 0x7c, 0x56, 0x4f, 0xdc, 0x21, 0xec, 0xd1, 0xe4, 0x3f, 0x2c, 0xc6, 0x66, 0xc5, 0xf3, 0xf5, 0x67, - 0xaf, 0xcc, 0x39, 0xfc, 0x7a, 0x32, 0xd3, 0x41, 0x0e, 0xa0, 0x2c, 0xb2, 0xbd, 0xf0, 0xf9, 0xaf, - 0x8f, 0xe2, 0xf3, 0xe1, 0x44, 0xea, 0x5f, 0xe7, 0xbc, 0x41, 0x0f, 0x18, 0x7d, 0x17, 0xc6, 0x89, - 0xab, 0xc2, 0x4d, 0xcf, 0x8f, 0x46, 0x51, 0x11, 0x84, 0xdf, 0x20, 0x64, 0x8b, 0x31, 0x81, 0x8a, - 0xbe, 0xc9, 0xd6, 0x8b, 0xf1, 0xb2, 0xaa, 0x97, 0x56, 0x4b, 0x3c, 0x63, 0xde, 0x76, 0xa7, 0xed, - 0x0f, 0xbf, 0x3c, 0xab, 0x41, 0xf0, 0x13, 0x87, 0x25, 0xe4, 0xdf, 0x83, 0xf9, 0x94, 0x14, 0x81, - 0xd4, 0xc8, 0xc3, 0x8a, 0x1b, 0x31, 0x1b, 0xf9, 0xb6, 0x21, 0xff, 0x17, 0xc2, 0x7f, 0x91, 0x60, - 0x8e, 0xef, 0x8e, 0xda, 0xb7, 0x35, 0x67, 0x70, 0x65, 0x79, 0xf9, 0x79, 0x24, 0x2f, 0x3f, 0x1c, - 0xb2, 0x25, 0x09, 0x0b, 0xb3, 0x72, 0xb3, 0xfc, 0x53, 0x09, 0x6e, 0x24, 0xb8, 0xaf, 0x20, 0x74, - 0xef, 0x47, 0x43, 0xf7, 0x3b, 0xa3, 0x4e, 0x28, 0x23, 0x7c, 0xff, 0xf7, 0x5c, 0xca, 0x74, 0xf8, - 0x29, 0x5d, 0x01, 0xb0, 0x6c, 0xed, 0x44, 0xd3, 0x49, 0x57, 0x34, 0x35, 0x54, 0x42, 0x2d, 0x6b, - 0x3e, 0x05, 0x87, 0xb8, 0x10, 0x85, 0xc5, 0x0e, 0x39, 0x54, 0xfa, 0xba, 0xb3, 0xda, 0xe9, 0xac, - 0x29, 0x96, 0x72, 0xa0, 0xe9, 0x9a, 0xa3, 0x89, 0xe7, 0x9f, 0x89, 0xe6, 0x63, 0xb7, 0xd9, 0x20, - 0x8d, 0xe3, 0xe5, 0x59, 0xed, 0x76, 0xda, 0xd7, 0x3e, 0x8f, 0x65, 0x80, 0x33, 0xa0, 0xd1, 0x00, - 0xaa, 0x36, 0xf9, 0x5e, 0x5f, 0xb3, 0x49, 0x67, 0xdd, 0x36, 0xad, 0x88, 0xda, 0x22, 0x57, 0xfb, - 0x5b, 0xe7, 0x67, 0xb5, 0x2a, 0xce, 0xe0, 0x19, 0xae, 0x38, 0x13, 0x1e, 0x7d, 0x06, 0xf3, 0x8a, - 0x68, 0x2e, 0x0c, 0x6b, 0x75, 0x4f, 0xe8, 0xfb, 0xe7, 0x67, 0xb5, 0xf9, 0xd5, 0x24, 0x79, 0xb8, - 0xc2, 0x34, 0x50, 0xd4, 0x80, 0xf2, 0x09, 0xef, 0x43, 0xa4, 0xd5, 0x31, 0x8e, 0xcf, 0x72, 0x55, - 0xd9, 0x6d, 0x4d, 0x64, 0x98, 0xe3, 0x1b, 0x6d, 0x7e, 0xf2, 0x3d, 0x2e, 0x76, 0xd5, 0x67, 0xa5, - 0xb4, 0x38, 0xf9, 0xfc, 0x0b, 0x40, 0x25, 0x88, 0x98, 0xcf, 0x02, 0x12, 0x0e, 0xf3, 0xa1, 0x4f, - 0x60, 0xe2, 0x48, 0xbc, 0x17, 0xd1, 0x6a, 0x39, 0x57, 0x9d, 0x10, 0x79, 0x5f, 0x6a, 0xce, 0x09, - 0x15, 0x13, 0xde, 0x30, 0xc5, 0x01, 0x22, 0x7a, 0x13, 0xca, 0xfc, 0xc7, 0xe6, 0x3a, 0x7f, 0x5e, - 0xad, 0x04, 0x71, 0xf5, 0x99, 0x3b, 0x8c, 0x3d, 0xba, 0xc7, 0xba, 0xd9, 0x5a, 0xe3, 0xcf, 0xfc, - 0x31, 0xd6, 0xcd, 0xd6, 0x1a, 0xf6, 0xe8, 0xe8, 0x53, 0x28, 0x53, 0xb2, 0xa5, 0x19, 0xfd, 0xd3, - 0x2a, 0xe4, 0x6a, 0x12, 0x68, 0x3f, 0xe1, 0xdc, 0xb1, 0x87, 0xce, 0x40, 0x83, 0xa0, 0x63, 0x0f, - 0x16, 0x1d, 0xc1, 0x84, 0xdd, 0x37, 0x56, 0xe9, 0x3e, 0x25, 0x76, 0x75, 0x92, 0xeb, 0x18, 0x96, - 0x4a, 0xb0, 0xc7, 0x1f, 0xd7, 0xe2, 0xaf, 0x90, 0xcf, 0x81, 0x03, 0x70, 0x74, 0x04, 0xc0, 0x7f, - 0xf0, 0x37, 0xd5, 0xea, 0x22, 0x57, 0xf5, 0x7e, 0x1e, 0x55, 0x69, 0x4f, 0xb7, 0xe2, 0xbb, 0x8a, - 0x4f, 0xc6, 0x21, 0x6c, 0xf4, 0x47, 0x12, 0x20, 0xda, 0xb7, 0x2c, 0x9d, 0xf4, 0x88, 0xe1, 0x28, - 0x3a, 0x1f, 0xa5, 0xd5, 0xeb, 0x5c, 0xe5, 0x87, 0xc3, 0x56, 0x30, 0x21, 0x18, 0x57, 0xed, 0x7f, - 0x2e, 0x49, 0xb2, 0xe2, 0x14, 0xbd, 0x6c, 0x13, 0x0f, 0xc5, 0xac, 0xa7, 0x72, 0x6d, 0x62, 0xfa, - 0x6b, 0x75, 0xb0, 0x89, 0x82, 0x8e, 0x3d, 0x58, 0xf4, 0x1c, 0x16, 0xbd, 0x86, 0x59, 0x6c, 0x9a, - 0xce, 0x86, 0xa6, 0x13, 0x3a, 0xa0, 0x0e, 0xe9, 0x55, 0xa7, 0xb9, 0x83, 0xf9, 0x5d, 0x43, 0x38, - 0x95, 0x0b, 0x67, 0x48, 0xa3, 0x1e, 0xd4, 0xbc, 0xe0, 0xc4, 0x4e, 0xae, 0x1f, 0x1d, 0x9f, 0x50, - 0x55, 0xd1, 0xdd, 0x2f, 0x48, 0x33, 0x5c, 0xc1, 0x1b, 0xe7, 0x67, 0xb5, 0xda, 0xfa, 0xc5, 0xac, - 0x78, 0x18, 0x16, 0xfa, 0x0e, 0x54, 0x95, 0x2c, 0x3d, 0xb3, 0x5c, 0xcf, 0x57, 0x59, 0xc4, 0xcb, - 0x54, 0x90, 0x29, 0x8d, 0x1c, 0x98, 0x55, 0xa2, 0xad, 0xcb, 0xb4, 0x3a, 0x97, 0xeb, 0x31, 0x3a, - 0xd6, 0xf1, 0x1c, 0xbc, 0x1b, 0xc5, 0x08, 0x14, 0x27, 0x34, 0xa0, 0xdf, 0x07, 0xa4, 0xc4, 0xbb, - 0xad, 0x69, 0x15, 0xe5, 0x4a, 0x74, 0x89, 0x36, 0xed, 0xc0, 0xed, 0x12, 0x24, 0x8a, 0x53, 0xf4, - 0xb0, 0x3b, 0x84, 0x12, 0xeb, 0x10, 0xa7, 0xd5, 0xa5, 0x44, 0x35, 0x74, 0x81, 0x72, 0x5f, 0x2e, - 0xf4, 0xa1, 0x2c, 0x8e, 0x88, 0x93, 0x4a, 0xd0, 0x16, 0x2c, 0x88, 0xc1, 0x7d, 0x83, 0x2a, 0x87, - 0xa4, 0x3d, 0xa0, 0xaa, 0xa3, 0xd3, 0xea, 0x3c, 0x8f, 0xef, 0xfc, 0x63, 0xed, 0x6a, 0x0a, 0x1d, - 0xa7, 0x4a, 0xa1, 0x0f, 0x61, 0xf6, 0xd0, 0xb4, 0x0f, 0xb4, 0x4e, 0x87, 0x18, 0x1e, 0xd2, 0x02, - 0x47, 0xe2, 0xcf, 0x60, 0x1b, 0x31, 0x1a, 0x4e, 0x70, 0x23, 0x0a, 0x37, 0x04, 0x72, 0xcb, 0x36, - 0xd5, 0x6d, 0xb3, 0x6f, 0x38, 0x6e, 0xc9, 0x79, 0xc3, 0x4f, 0xa3, 0x37, 0x56, 0xd3, 0x18, 0x5e, - 0x9e, 0xd5, 0xee, 0xa4, 0x5f, 0x44, 0x02, 0x26, 0x9c, 0x8e, 0x8d, 0x2c, 0xb8, 0x2e, 0xfa, 0xfe, - 0xf9, 0x7b, 0x5c, 0xb5, 0xca, 0x8f, 0xfe, 0x07, 0xc3, 0x03, 0x9e, 0x2f, 0x12, 0x3f, 0xff, 0xb3, - 0xe7, 0x67, 0xb5, 0xeb, 0x61, 0x06, 0x1c, 0xd1, 0xc0, 0xfb, 0xbc, 0xc4, 0xd7, 0xc5, 0xab, 0xe9, - 0x95, 0x1f, 0xad, 0xcf, 0x2b, 0x30, 0xed, 0x95, 0xf5, 0x79, 0x85, 0x20, 0x2f, 0x7e, 0x1d, 0xfa, - 0xaf, 0x02, 0xcc, 0x07, 0xcc, 0xb9, 0xfb, 0xbc, 0x52, 0x44, 0x7e, 0xd5, 0x2f, 0x9f, 0xaf, 0xf7, - 0x2a, 0x58, 0xba, 0xff, 0x7b, 0xbd, 0x57, 0x81, 0x6d, 0x19, 0xb7, 0x87, 0xbf, 0x29, 0x84, 0x27, - 0x30, 0x62, 0x03, 0xd0, 0x2b, 0x68, 0x19, 0xff, 0xd2, 0xf5, 0x10, 0xc9, 0x3f, 0x2d, 0xc2, 0x6c, - 0xfc, 0x34, 0x46, 0xfa, 0x44, 0xa4, 0xa1, 0x7d, 0x22, 0x2d, 0x58, 0x38, 0xec, 0xeb, 0xfa, 0x80, - 0xcf, 0x21, 0xd4, 0x2c, 0xe2, 0x7e, 0xb1, 0xfd, 0xaa, 0x90, 0x5c, 0xd8, 0x48, 0xe1, 0xc1, 0xa9, - 0x92, 0xc9, 0xb6, 0x91, 0xd2, 0x2f, 0xdb, 0x36, 0x32, 0x76, 0x89, 0xb6, 0x91, 0xf4, 0xce, 0x9b, - 0xe2, 0xa5, 0x3a, 0x6f, 0x2e, 0xd3, 0x33, 0x92, 0x12, 0xc4, 0x86, 0xbe, 0x6e, 0x7c, 0x03, 0xa6, - 0xa3, 0x7d, 0x4c, 0xee, 0x5e, 0xba, 0xad, 0x54, 0xe2, 0xcb, 0x78, 0x68, 0x2f, 0xdd, 0x71, 0xec, - 0x73, 0xc8, 0xe7, 0x12, 0x2c, 0xa6, 0xf7, 0x2b, 0x23, 0x1d, 0xa6, 0x7b, 0xca, 0x69, 0xb8, 0x87, - 0x5c, 0xba, 0xe4, 0xe3, 0x1d, 0x6f, 0x60, 0xd9, 0x8e, 0x60, 0xe1, 0x18, 0x36, 0xfa, 0x18, 0x2a, - 0x3d, 0xe5, 0xb4, 0xdd, 0xb7, 0xbb, 0xe4, 0xd2, 0x8f, 0x84, 0xfc, 0x18, 0x6d, 0x0b, 0x14, 0xec, - 0xe3, 0xc9, 0xbf, 0x90, 0x60, 0x29, 0xa3, 0x2d, 0xe5, 0xff, 0xd1, 0x2c, 0xff, 0x52, 0x82, 0xaf, - 0x64, 0x5e, 0xc3, 0xd0, 0xa3, 0x48, 0x07, 0x8d, 0x1c, 0xeb, 0xa0, 0x41, 0x49, 0xc1, 0xd7, 0xd4, - 0x40, 0xf3, 0xb9, 0x04, 0xd5, 0xac, 0x7b, 0x29, 0x7a, 0x2f, 0x62, 0xe4, 0xd7, 0x62, 0x46, 0xce, - 0x25, 0xe4, 0x5e, 0x93, 0x8d, 0xff, 0x2a, 0xc1, 0xad, 0x0b, 0xea, 0x3b, 0xff, 0xfa, 0x43, 0x3a, - 0x61, 0x2e, 0xfe, 0x6a, 0x2f, 0x3e, 0x27, 0x06, 0xd7, 0x9f, 0x14, 0x1e, 0x9c, 0x29, 0x8d, 0xf6, - 0x61, 0x49, 0xdc, 0xbd, 0xe2, 0x34, 0x51, 0xba, 0xf0, 0x46, 0xc3, 0xf5, 0x74, 0x16, 0x9c, 0x25, - 0x2b, 0xff, 0xb5, 0x04, 0x8b, 0xe9, 0x0f, 0x0e, 0xe8, 0xdd, 0xc8, 0x92, 0xd7, 0x62, 0x4b, 0x3e, - 0x13, 0x93, 0x12, 0x0b, 0xfe, 0x5d, 0x98, 0x16, 0xcf, 0x12, 0x02, 0x46, 0x38, 0xb3, 0x9c, 0x96, - 0x9d, 0x04, 0x84, 0x57, 0x1c, 0xf3, 0x63, 0x12, 0x1d, 0xc3, 0x31, 0x34, 0xf9, 0x07, 0x05, 0x18, - 0x6b, 0xab, 0x8a, 0x4e, 0xae, 0xa0, 0x36, 0xfe, 0x56, 0xa4, 0x36, 0x1e, 0xf6, 0x27, 0x7c, 0xdc, - 0xaa, 0xcc, 0xb2, 0x18, 0xc7, 0xca, 0xe2, 0xb7, 0x72, 0xa1, 0x5d, 0x5c, 0x11, 0xff, 0x26, 0x4c, - 0xf8, 0x4a, 0x47, 0x4b, 0xd4, 0xf2, 0x5f, 0x14, 0x60, 0x32, 0xa4, 0x62, 0xc4, 0x34, 0x7f, 0x18, - 0xa9, 0x6d, 0x8a, 0x39, 0x1e, 0x81, 0x42, 0xba, 0xea, 0x5e, 0x35, 0xe3, 0xb6, 0xa0, 0x07, 0x4d, - 0xc7, 0xc9, 0x22, 0xe7, 0x1b, 0x30, 0xed, 0x28, 0x76, 0x97, 0x38, 0xfe, 0x07, 0x19, 0xb7, 0x45, - 0xce, 0xff, 0x5b, 0x88, 0xbd, 0x08, 0x15, 0xc7, 0xb8, 0x6f, 0x3e, 0x86, 0xa9, 0x88, 0xb2, 0x91, - 0x3a, 0xc8, 0xff, 0x56, 0x82, 0xaf, 0x0d, 0x7d, 0x48, 0x42, 0xcd, 0xc8, 0x21, 0xa9, 0xc7, 0x0e, - 0xc9, 0x72, 0x36, 0xc0, 0xeb, 0xeb, 0x44, 0x6c, 0xae, 0xbd, 0xf8, 0x62, 0xf9, 0xda, 0xcf, 0xbe, - 0x58, 0xbe, 0xf6, 0xf3, 0x2f, 0x96, 0xaf, 0xfd, 0xc1, 0xf9, 0xb2, 0xf4, 0xe2, 0x7c, 0x59, 0xfa, - 0xd9, 0xf9, 0xb2, 0xf4, 0xf3, 0xf3, 0x65, 0xe9, 0x3f, 0xce, 0x97, 0xa5, 0x3f, 0xf9, 0xc5, 0xf2, - 0xb5, 0x8f, 0x6f, 0x5f, 0xf8, 0x27, 0xff, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x9b, 0x34, 0xc8, - 0x81, 0x2b, 0x40, 0x00, 0x00, -} - -func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { + // 2858 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x24, 0x47, + 0x15, 0xdf, 0x9e, 0xf1, 0xd8, 0xe3, 0xe7, 0xb5, 0xbd, 0x5b, 0xeb, 0xac, 0x1d, 0x2f, 0xb1, 0xa3, + 0x46, 0x84, 0x4d, 0xd8, 0xcc, 0xb0, 0x9b, 0x64, 0xc9, 0x87, 0x94, 0xb0, 0xe3, 0xdd, 0x64, 0x9d, + 0xd8, 0xe3, 0x49, 0xcd, 0x38, 0x41, 0x11, 0x01, 0xda, 0x3d, 0xe5, 0x71, 0xc7, 0x3d, 0xdd, 0xa3, + 0xee, 0x1a, 0xb3, 0xbe, 0x81, 0xe0, 0x92, 0x13, 0x5c, 0x02, 0x1c, 0x91, 0x90, 0xb8, 0x72, 0xe5, + 0x10, 0x22, 0x10, 0x41, 0x5a, 0x21, 0x0e, 0x91, 0x38, 0x90, 0x93, 0x45, 0x9c, 0x13, 0xe2, 0x1f, + 0x40, 0x7b, 0x42, 0xf5, 0xd1, 0xd5, 0xdf, 0x76, 0x8f, 0xf1, 0x5a, 0x04, 0x71, 0x5a, 0x4f, 0xbd, + 0xf7, 0x7e, 0xf5, 0xaa, 0xea, 0xd5, 0x7b, 0xbf, 0xaa, 0xea, 0x85, 0x57, 0x77, 0x9f, 0xf7, 0x6b, + 0x96, 0x5b, 0xdf, 0x1d, 0x6e, 0x11, 0xcf, 0x21, 0x94, 0xf8, 0xf5, 0x3d, 0xe2, 0x74, 0x5d, 0xaf, + 0x2e, 0x05, 0xc6, 0xc0, 0xaa, 0x93, 0x7b, 0x94, 0x38, 0xbe, 0xe5, 0x3a, 0x7e, 0x7d, 0xef, 0xfa, + 0x16, 0xa1, 0xc6, 0xf5, 0x7a, 0x8f, 0x38, 0xc4, 0x33, 0x28, 0xe9, 0xd6, 0x06, 0x9e, 0x4b, 0x5d, + 0xf4, 0x98, 0x50, 0xaf, 0x19, 0x03, 0xab, 0x16, 0xaa, 0xd7, 0xa4, 0xfa, 0xe2, 0xd3, 0x3d, 0x8b, + 0xee, 0x0c, 0xb7, 0x6a, 0xa6, 0xdb, 0xaf, 0xf7, 0xdc, 0x9e, 0x5b, 0xe7, 0x56, 0x5b, 0xc3, 0x6d, + 0xfe, 0x8b, 0xff, 0xe0, 0x7f, 0x09, 0xb4, 0x45, 0x3d, 0xd2, 0xb9, 0xe9, 0x7a, 0xa4, 0xbe, 0x97, + 0xea, 0x71, 0xf1, 0xd9, 0x50, 0xa7, 0x6f, 0x98, 0x3b, 0x96, 0x43, 0xbc, 0xfd, 0xfa, 0x60, 0xb7, + 0xc7, 0x1a, 0xfc, 0x7a, 0x9f, 0x50, 0x23, 0xcb, 0xaa, 0x9e, 0x67, 0xe5, 0x0d, 0x1d, 0x6a, 0xf5, + 0x49, 0xca, 0xe0, 0xe6, 0x71, 0x06, 0xbe, 0xb9, 0x43, 0xfa, 0x46, 0xca, 0xee, 0x99, 0x3c, 0xbb, + 0x21, 0xb5, 0xec, 0xba, 0xe5, 0x50, 0x9f, 0x7a, 0x49, 0x23, 0xfd, 0x83, 0x12, 0x4c, 0xde, 0x36, + 0x48, 0xdf, 0x75, 0xda, 0x84, 0xa2, 0xef, 0x41, 0x95, 0x0d, 0xa3, 0x6b, 0x50, 0x63, 0x41, 0x7b, + 0x5c, 0xbb, 0x3a, 0x75, 0xe3, 0xeb, 0xb5, 0x70, 0x9a, 0x15, 0x6a, 0x6d, 0xb0, 0xdb, 0x63, 0x0d, + 0x7e, 0x8d, 0x69, 0xd7, 0xf6, 0xae, 0xd7, 0x36, 0xb6, 0xde, 0x23, 0x26, 0x5d, 0x27, 0xd4, 0x68, + 0xa0, 0xfb, 0x07, 0xcb, 0xe7, 0x0e, 0x0f, 0x96, 0x21, 0x6c, 0xc3, 0x0a, 0x15, 0x35, 0x61, 0xcc, + 0x1f, 0x10, 0x73, 0xa1, 0xc4, 0xd1, 0xaf, 0xd5, 0x8e, 0x5c, 0xc4, 0x9a, 0xf2, 0xac, 0x3d, 0x20, + 0x66, 0xe3, 0xbc, 0x44, 0x1e, 0x63, 0xbf, 0x30, 0xc7, 0x41, 0x6f, 0xc1, 0xb8, 0x4f, 0x0d, 0x3a, + 0xf4, 0x17, 0xca, 0x1c, 0xb1, 0x56, 0x18, 0x91, 0x5b, 0x35, 0x66, 0x24, 0xe6, 0xb8, 0xf8, 0x8d, + 0x25, 0x9a, 0xfe, 0x8f, 0x12, 0x20, 0xa5, 0xbb, 0xe2, 0x3a, 0x5d, 0x8b, 0x5a, 0xae, 0x83, 0x5e, + 0x84, 0x31, 0xba, 0x3f, 0x20, 0x7c, 0x72, 0x26, 0x1b, 0x4f, 0x04, 0x0e, 0x75, 0xf6, 0x07, 0xe4, + 0xc1, 0xc1, 0xf2, 0xe5, 0xb4, 0x05, 0x93, 0x60, 0x6e, 0x83, 0xd6, 0x94, 0xab, 0x25, 0x6e, 0xfd, + 0x6c, 0xbc, 0xeb, 0x07, 0x07, 0xcb, 0x19, 0x41, 0x58, 0x53, 0x48, 0x71, 0x07, 0xd1, 0x1e, 0x20, + 0xdb, 0xf0, 0x69, 0xc7, 0x33, 0x1c, 0x5f, 0xf4, 0x64, 0xf5, 0x89, 0x9c, 0x84, 0xa7, 0x8a, 0x2d, + 0x1a, 0xb3, 0x68, 0x2c, 0x4a, 0x2f, 0xd0, 0x5a, 0x0a, 0x0d, 0x67, 0xf4, 0x80, 0x9e, 0x80, 0x71, + 0x8f, 0x18, 0xbe, 0xeb, 0x2c, 0x8c, 0xf1, 0x51, 0xa8, 0x09, 0xc4, 0xbc, 0x15, 0x4b, 0x29, 0x7a, + 0x12, 0x26, 0xfa, 0xc4, 0xf7, 0x8d, 0x1e, 0x59, 0xa8, 0x70, 0xc5, 0x59, 0xa9, 0x38, 0xb1, 0x2e, + 0x9a, 0x71, 0x20, 0xd7, 0x3f, 0xd4, 0x60, 0x5a, 0xcd, 0xdc, 0x9a, 0xe5, 0x53, 0xf4, 0xed, 0x54, + 0x1c, 0xd6, 0x8a, 0x0d, 0x89, 0x59, 0xf3, 0x28, 0xbc, 0x20, 0x7b, 0xab, 0x06, 0x2d, 0x91, 0x18, + 0x5c, 0x87, 0x8a, 0x45, 0x49, 0x9f, 0xad, 0x43, 0xf9, 0xea, 0xd4, 0x8d, 0xab, 0x45, 0x43, 0xa6, + 0x31, 0x2d, 0x41, 0x2b, 0xab, 0xcc, 0x1c, 0x0b, 0x14, 0xfd, 0x67, 0x63, 0x11, 0xf7, 0x59, 0x68, + 0xa2, 0x77, 0xa1, 0xea, 0x13, 0x9b, 0x98, 0xd4, 0xf5, 0xa4, 0xfb, 0xcf, 0x14, 0x74, 0xdf, 0xd8, + 0x22, 0x76, 0x5b, 0x9a, 0x36, 0xce, 0x33, 0xff, 0x83, 0x5f, 0x58, 0x41, 0xa2, 0x37, 0xa1, 0x4a, + 0x49, 0x7f, 0x60, 0x1b, 0x94, 0xc8, 0x7d, 0xf4, 0xe5, 0xe8, 0x10, 0x58, 0xe4, 0x30, 0xb0, 0x96, + 0xdb, 0xed, 0x48, 0x35, 0xbe, 0x7d, 0xd4, 0x94, 0x04, 0xad, 0x58, 0xc1, 0xa0, 0x3d, 0x98, 0x19, + 0x0e, 0xba, 0x4c, 0x93, 0xb2, 0xec, 0xd0, 0xdb, 0x97, 0x91, 0x74, 0xb3, 0xe8, 0xdc, 0x6c, 0xc6, + 0xac, 0x1b, 0x97, 0x65, 0x5f, 0x33, 0xf1, 0x76, 0x9c, 0xe8, 0x05, 0xdd, 0x82, 0xd9, 0xbe, 0xe5, + 0x60, 0x62, 0x74, 0xf7, 0xdb, 0xc4, 0x74, 0x9d, 0xae, 0xcf, 0xc3, 0xaa, 0xd2, 0x98, 0x97, 0x00, + 0xb3, 0xeb, 0x71, 0x31, 0x4e, 0xea, 0xa3, 0xd7, 0x01, 0x05, 0xc3, 0x78, 0x4d, 0x24, 0x37, 0xcb, + 0x75, 0x78, 0xcc, 0x95, 0xc3, 0xe0, 0xee, 0xa4, 0x34, 0x70, 0x86, 0x15, 0x5a, 0x83, 0x39, 0x8f, + 0xec, 0x59, 0x6c, 0x8c, 0x77, 0x2d, 0x9f, 0xba, 0xde, 0xfe, 0x9a, 0xd5, 0xb7, 0xe8, 0xc2, 0x38, + 0xf7, 0x69, 0xe1, 0xf0, 0x60, 0x79, 0x0e, 0x67, 0xc8, 0x71, 0xa6, 0x95, 0xfe, 0xf3, 0x71, 0x98, + 0x4d, 0xe4, 0x1b, 0xf4, 0x16, 0x5c, 0x36, 0x87, 0x9e, 0x47, 0x1c, 0xda, 0x1c, 0xf6, 0xb7, 0x88, + 0xd7, 0x36, 0x77, 0x48, 0x77, 0x68, 0x93, 0x2e, 0x0f, 0x94, 0x4a, 0x63, 0x49, 0x7a, 0x7c, 0x79, + 0x25, 0x53, 0x0b, 0xe7, 0x58, 0xb3, 0x59, 0x70, 0x78, 0xd3, 0xba, 0xe5, 0xfb, 0x0a, 0xb3, 0xc4, + 0x31, 0xd5, 0x2c, 0x34, 0x53, 0x1a, 0x38, 0xc3, 0x8a, 0xf9, 0xd8, 0x25, 0xbe, 0xe5, 0x91, 0x6e, + 0xd2, 0xc7, 0x72, 0xdc, 0xc7, 0xdb, 0x99, 0x5a, 0x38, 0xc7, 0x1a, 0x3d, 0x07, 0x53, 0xa2, 0x37, + 0xbe, 0x7e, 0x72, 0xa1, 0x2f, 0x49, 0xb0, 0xa9, 0x66, 0x28, 0xc2, 0x51, 0x3d, 0x36, 0x34, 0x77, + 0xcb, 0x27, 0xde, 0x1e, 0xe9, 0xe6, 0x2f, 0xf0, 0x46, 0x4a, 0x03, 0x67, 0x58, 0xb1, 0xa1, 0x89, + 0x08, 0x4c, 0x0d, 0x6d, 0x3c, 0x3e, 0xb4, 0xcd, 0x4c, 0x2d, 0x9c, 0x63, 0xcd, 0xe2, 0x58, 0xb8, + 0x7c, 0x6b, 0xcf, 0xb0, 0x6c, 0x63, 0xcb, 0x26, 0x0b, 0x13, 0xf1, 0x38, 0x6e, 0xc6, 0xc5, 0x38, + 0xa9, 0x8f, 0x5e, 0x83, 0x8b, 0xa2, 0x69, 0xd3, 0x31, 0x14, 0x48, 0x95, 0x83, 0x3c, 0x2a, 0x41, + 0x2e, 0x36, 0x93, 0x0a, 0x38, 0x6d, 0x83, 0x5e, 0x84, 0x19, 0xd3, 0xb5, 0x6d, 0x1e, 0x8f, 0x2b, + 0xee, 0xd0, 0xa1, 0x0b, 0x93, 0x1c, 0x05, 0xb1, 0xfd, 0xb8, 0x12, 0x93, 0xe0, 0x84, 0x26, 0x22, + 0x00, 0x66, 0x50, 0x70, 0xfc, 0x05, 0xe0, 0xf9, 0xf1, 0x7a, 0xd1, 0x1c, 0xa0, 0x4a, 0x55, 0xc8, + 0x01, 0x54, 0x93, 0x8f, 0x23, 0xc0, 0xfa, 0x9f, 0x35, 0x98, 0xcf, 0x49, 0x1d, 0xe8, 0x95, 0x58, + 0x89, 0xfd, 0x5a, 0xa2, 0xc4, 0x5e, 0xc9, 0x31, 0x8b, 0xd4, 0x59, 0x07, 0xa6, 0x3d, 0x36, 0x2a, + 0xa7, 0x27, 0x54, 0x64, 0x8e, 0x7c, 0xee, 0x98, 0x61, 0xe0, 0xa8, 0x4d, 0x98, 0xf3, 0x2f, 0x1e, + 0x1e, 0x2c, 0x4f, 0xc7, 0x64, 0x38, 0x0e, 0xaf, 0xff, 0xa2, 0x04, 0x70, 0x9b, 0x0c, 0x6c, 0x77, + 0xbf, 0x4f, 0x9c, 0xb3, 0xe0, 0x50, 0x1b, 0x31, 0x0e, 0xf5, 0xf4, 0x71, 0xcb, 0xa3, 0x5c, 0xcb, + 0x25, 0x51, 0x6f, 0x27, 0x48, 0x54, 0xbd, 0x38, 0xe4, 0xd1, 0x2c, 0xea, 0x6f, 0x65, 0xb8, 0x14, + 0x2a, 0x87, 0x34, 0xea, 0xa5, 0xd8, 0x1a, 0x7f, 0x35, 0xb1, 0xc6, 0xf3, 0x19, 0x26, 0x0f, 0x8d, + 0x47, 0xbd, 0x07, 0x33, 0x8c, 0xe5, 0x88, 0xb5, 0xe4, 0x1c, 0x6a, 0x7c, 0x64, 0x0e, 0xa5, 0xaa, + 0xdd, 0x5a, 0x0c, 0x09, 0x27, 0x90, 0x73, 0x38, 0xdb, 0xc4, 0x17, 0x91, 0xb3, 0x7d, 0xa4, 0xc1, + 0x4c, 0xb8, 0x4c, 0x67, 0x40, 0xda, 0x9a, 0x71, 0xd2, 0xf6, 0x64, 0xe1, 0x10, 0xcd, 0x61, 0x6d, + 0xff, 0x62, 0x04, 0x5f, 0x29, 0xb1, 0x0d, 0xbe, 0x65, 0x98, 0xbb, 0xe8, 0x71, 0x18, 0x73, 0x8c, + 0x7e, 0x10, 0x99, 0x6a, 0xb3, 0x34, 0x8d, 0x3e, 0xc1, 0x5c, 0x82, 0x3e, 0xd0, 0x00, 0xc9, 0x2a, + 0x70, 0xcb, 0x71, 0x5c, 0x6a, 0x88, 0x5c, 0x29, 0xdc, 0x5a, 0x2d, 0xec, 0x56, 0xd0, 0x63, 0x6d, + 0x33, 0x85, 0x75, 0xc7, 0xa1, 0xde, 0x7e, 0xb8, 0xc8, 0x69, 0x05, 0x9c, 0xe1, 0x00, 0x32, 0x00, + 0x3c, 0x89, 0xd9, 0x71, 0xe5, 0x46, 0x7e, 0xba, 0x40, 0xce, 0x63, 0x06, 0x2b, 0xae, 0xb3, 0x6d, + 0xf5, 0xc2, 0xb4, 0x83, 0x15, 0x10, 0x8e, 0x80, 0x2e, 0xde, 0x81, 0xf9, 0x1c, 0x6f, 0xd1, 0x05, + 0x28, 0xef, 0x92, 0x7d, 0x31, 0x6d, 0x98, 0xfd, 0x89, 0xe6, 0xa0, 0xb2, 0x67, 0xd8, 0x43, 0x91, + 0x7e, 0x27, 0xb1, 0xf8, 0xf1, 0x62, 0xe9, 0x79, 0x4d, 0xff, 0xb0, 0x12, 0x8d, 0x1d, 0xce, 0x98, + 0xaf, 0x42, 0xd5, 0x23, 0x03, 0xdb, 0x32, 0x0d, 0x5f, 0x12, 0x21, 0x4e, 0x7e, 0xb1, 0x6c, 0xc3, + 0x4a, 0x1a, 0xe3, 0xd6, 0xa5, 0x87, 0xcb, 0xad, 0xcb, 0xa7, 0xc3, 0xad, 0xbf, 0x0b, 0x55, 0x3f, + 0x60, 0xd5, 0x63, 0x1c, 0xf2, 0xfa, 0x08, 0xf9, 0x55, 0x12, 0x6a, 0xd5, 0x81, 0xa2, 0xd2, 0x0a, + 0x34, 0x8b, 0x44, 0x57, 0x46, 0x24, 0xd1, 0xa7, 0x4a, 0x7c, 0x59, 0xbe, 0x19, 0x18, 0x43, 0x9f, + 0x74, 0x79, 0x6e, 0xab, 0x86, 0xf9, 0xa6, 0xc5, 0x5b, 0xb1, 0x94, 0xa2, 0x77, 0x63, 0x21, 0x5b, + 0x3d, 0x49, 0xc8, 0xce, 0xe4, 0x87, 0x2b, 0xda, 0x84, 0xf9, 0x81, 0xe7, 0xf6, 0x3c, 0xe2, 0xfb, + 0xb7, 0x89, 0xd1, 0xb5, 0x2d, 0x87, 0x04, 0xf3, 0x23, 0x18, 0xd1, 0x95, 0xc3, 0x83, 0xe5, 0xf9, + 0x56, 0xb6, 0x0a, 0xce, 0xb3, 0xd5, 0xef, 0x8f, 0xc1, 0x85, 0x64, 0x05, 0xcc, 0x21, 0xa9, 0xda, + 0x89, 0x48, 0xea, 0xb5, 0xc8, 0x66, 0x10, 0x0c, 0x5e, 0xad, 0x7e, 0xc6, 0x86, 0xb8, 0x05, 0xb3, + 0x32, 0x1b, 0x04, 0x42, 0x49, 0xd3, 0xd5, 0xea, 0x6f, 0xc6, 0xc5, 0x38, 0xa9, 0x8f, 0x5e, 0x82, + 0x69, 0x8f, 0xf3, 0xee, 0x00, 0x40, 0x70, 0xd7, 0x47, 0x24, 0xc0, 0x34, 0x8e, 0x0a, 0x71, 0x5c, + 0x97, 0xf1, 0xd6, 0x90, 0x8e, 0x06, 0x00, 0x63, 0x71, 0xde, 0x7a, 0x2b, 0xa9, 0x80, 0xd3, 0x36, + 0x68, 0x1d, 0x2e, 0x0d, 0x9d, 0x34, 0x94, 0x08, 0xe5, 0x2b, 0x12, 0xea, 0xd2, 0x66, 0x5a, 0x05, + 0x67, 0xd9, 0xa1, 0xed, 0x18, 0x95, 0x1d, 0xe7, 0xe9, 0xf9, 0x46, 0xe1, 0x8d, 0x57, 0x98, 0xcb, + 0x66, 0xd0, 0xed, 0x6a, 0x51, 0xba, 0xad, 0xff, 0x41, 0x8b, 0x16, 0x21, 0x45, 0x81, 0x8f, 0xbb, + 0x65, 0x4a, 0x59, 0x44, 0xd8, 0x91, 0x9b, 0xcd, 0x7e, 0x6f, 0x8e, 0xc4, 0x7e, 0xc3, 0xe2, 0x79, + 0x3c, 0xfd, 0xfd, 0xa3, 0x06, 0xb3, 0x77, 0x3b, 0x9d, 0xd6, 0xaa, 0xc3, 0x77, 0x4b, 0xcb, 0xa0, + 0x3b, 0xac, 0x8a, 0x0e, 0x0c, 0xba, 0x93, 0xac, 0xa2, 0x4c, 0x86, 0xb9, 0x04, 0x3d, 0x0b, 0x55, + 0xf6, 0x2f, 0x73, 0x9c, 0x87, 0xeb, 0x24, 0x4f, 0x32, 0xd5, 0x96, 0x6c, 0x7b, 0x10, 0xf9, 0x1b, + 0x2b, 0x4d, 0xf4, 0x2d, 0x98, 0x60, 0x7b, 0x9b, 0x38, 0xdd, 0x82, 0xe4, 0x57, 0x3a, 0xd5, 0x10, + 0x46, 0x21, 0x9f, 0x91, 0x0d, 0x38, 0x80, 0xd3, 0x77, 0x61, 0x2e, 0x32, 0x08, 0x3c, 0xb4, 0xc9, + 0x5b, 0xac, 0x5e, 0xa1, 0x36, 0x54, 0x58, 0xef, 0xac, 0x2a, 0x95, 0x0b, 0x5c, 0x2f, 0x26, 0x26, + 0x22, 0xe4, 0x1e, 0xec, 0x97, 0x8f, 0x05, 0x96, 0xbe, 0x01, 0x13, 0xab, 0xad, 0x86, 0xed, 0x0a, + 0xbe, 0x61, 0x5a, 0x5d, 0x2f, 0x39, 0x53, 0x2b, 0xab, 0xb7, 0x31, 0xe6, 0x12, 0xa4, 0xc3, 0x38, + 0xb9, 0x67, 0x92, 0x01, 0xe5, 0x14, 0x63, 0xb2, 0x01, 0x2c, 0x91, 0xde, 0xe1, 0x2d, 0x58, 0x4a, + 0xf4, 0x9f, 0x94, 0x60, 0x42, 0x76, 0x7b, 0x06, 0xe7, 0x8f, 0xb5, 0xd8, 0xf9, 0xe3, 0xa9, 0x62, + 0x4b, 0x90, 0x7b, 0xf8, 0xe8, 0x24, 0x0e, 0x1f, 0xd7, 0x0a, 0xe2, 0x1d, 0x7d, 0xf2, 0x78, 0xbf, + 0x04, 0x33, 0xf1, 0xc5, 0x47, 0xcf, 0xc1, 0x14, 0x4b, 0xb5, 0x96, 0x49, 0x9a, 0x21, 0xc3, 0x53, + 0xd7, 0x0f, 0xed, 0x50, 0x84, 0xa3, 0x7a, 0xa8, 0xa7, 0xcc, 0x5a, 0xae, 0x47, 0xe5, 0xa0, 0xf3, + 0xa7, 0x74, 0x48, 0x2d, 0xbb, 0x26, 0x2e, 0xdb, 0x6b, 0xab, 0x0e, 0xdd, 0xf0, 0xda, 0xd4, 0xb3, + 0x9c, 0x5e, 0xaa, 0x23, 0x06, 0x86, 0xa3, 0xc8, 0xe8, 0x6d, 0x96, 0xf6, 0x7d, 0x77, 0xe8, 0x99, + 0x24, 0x8b, 0xbe, 0x05, 0xd4, 0x83, 0x6d, 0x84, 0xee, 0x9a, 0x6b, 0x1a, 0xb6, 0x58, 0x1c, 0x4c, + 0xb6, 0x89, 0x47, 0x1c, 0x93, 0x04, 0x94, 0x49, 0x40, 0x60, 0x05, 0xa6, 0xff, 0x56, 0x83, 0x29, + 0x39, 0x17, 0x67, 0x40, 0xd4, 0xdf, 0x88, 0x13, 0xf5, 0x27, 0x0a, 0xee, 0xd0, 0x6c, 0x96, 0xfe, + 0x3b, 0x0d, 0x16, 0x03, 0xd7, 0x5d, 0xa3, 0xdb, 0x30, 0x6c, 0xc3, 0x31, 0x89, 0x17, 0xc4, 0xfa, + 0x22, 0x94, 0xac, 0x81, 0x5c, 0x49, 0x90, 0x00, 0xa5, 0xd5, 0x16, 0x2e, 0x59, 0x03, 0x56, 0x45, + 0x77, 0x5c, 0x9f, 0x72, 0x36, 0x2f, 0x0e, 0x8a, 0xca, 0xeb, 0xbb, 0xb2, 0x1d, 0x2b, 0x0d, 0xb4, + 0x09, 0x95, 0x81, 0xeb, 0x51, 0x56, 0xb9, 0xca, 0x89, 0xf5, 0x3d, 0xc2, 0x6b, 0xb6, 0x6e, 0x32, + 0x10, 0xc3, 0x9d, 0xce, 0x60, 0xb0, 0x40, 0xd3, 0x7f, 0xa8, 0xc1, 0xa3, 0x19, 0xfe, 0x4b, 0xd2, + 0xd0, 0x85, 0x09, 0x4b, 0x08, 0x65, 0x7a, 0x79, 0xa1, 0x58, 0xb7, 0x19, 0x53, 0x11, 0xa6, 0xb6, + 0x20, 0x85, 0x05, 0xd0, 0xfa, 0xaf, 0x34, 0xb8, 0x98, 0xf2, 0x97, 0xa7, 0x68, 0x16, 0xcf, 0x92, + 0x6d, 0xab, 0x14, 0xcd, 0xc2, 0x92, 0x4b, 0xd0, 0x1b, 0x50, 0xe5, 0x6f, 0x44, 0xa6, 0x6b, 0xcb, + 0x09, 0xac, 0x07, 0x13, 0xd8, 0x92, 0xed, 0x0f, 0x0e, 0x96, 0xaf, 0x64, 0x9c, 0xb5, 0x03, 0x31, + 0x56, 0x00, 0x68, 0x19, 0x2a, 0xc4, 0xf3, 0x5c, 0x4f, 0x26, 0xfb, 0x49, 0x36, 0x53, 0x77, 0x58, + 0x03, 0x16, 0xed, 0xfa, 0xaf, 0xc3, 0x20, 0x65, 0xd9, 0x97, 0xf9, 0xc7, 0x16, 0x27, 0x99, 0x18, + 0xd9, 0xd2, 0x61, 0x2e, 0x41, 0x43, 0xb8, 0x60, 0x25, 0xd2, 0xb5, 0xdc, 0x9d, 0xf5, 0x62, 0xd3, + 0xa8, 0xcc, 0x1a, 0x0b, 0x12, 0xfe, 0x42, 0x52, 0x82, 0x53, 0x5d, 0xe8, 0x04, 0x52, 0x5a, 0xe8, + 0x4d, 0x18, 0xdb, 0xa1, 0x74, 0x90, 0x71, 0xd9, 0x7f, 0x4c, 0x91, 0x08, 0x5d, 0xa8, 0xf2, 0xd1, + 0x75, 0x3a, 0x2d, 0xcc, 0xa1, 0xf4, 0xdf, 0x97, 0xd4, 0x7c, 0xf0, 0x13, 0xd2, 0x37, 0xd5, 0x68, + 0x57, 0x6c, 0xc3, 0xf7, 0x79, 0x0a, 0x13, 0xa7, 0xf9, 0xb9, 0x88, 0xe3, 0x4a, 0x86, 0x53, 0xda, + 0xa8, 0x13, 0x16, 0x4f, 0xed, 0x24, 0xc5, 0x73, 0x2a, 0xab, 0x70, 0xa2, 0xbb, 0x50, 0xa6, 0x76, + 0xd1, 0x53, 0xb9, 0x44, 0xec, 0xac, 0xb5, 0x1b, 0x53, 0x72, 0xca, 0xcb, 0x9d, 0xb5, 0x36, 0x66, + 0x10, 0x68, 0x03, 0x2a, 0xde, 0xd0, 0x26, 0xac, 0x0e, 0x94, 0x8b, 0xd7, 0x15, 0x36, 0x83, 0xe1, + 0xe6, 0x63, 0xbf, 0x7c, 0x2c, 0x70, 0xf4, 0x1f, 0x69, 0x30, 0x1d, 0xab, 0x16, 0xc8, 0x83, 0xf3, + 0x76, 0x64, 0xef, 0xc8, 0x79, 0x78, 0x7e, 0xf4, 0x5d, 0x27, 0x37, 0xfd, 0x9c, 0xec, 0xf7, 0x7c, + 0x54, 0x86, 0x63, 0x7d, 0xe8, 0x06, 0x40, 0x38, 0x6c, 0xb6, 0x0f, 0x58, 0xf0, 0x8a, 0x0d, 0x2f, + 0xf7, 0x01, 0x8b, 0x69, 0x1f, 0x8b, 0x76, 0x74, 0x03, 0xc0, 0x27, 0xa6, 0x47, 0x68, 0x33, 0x4c, + 0x5c, 0xaa, 0x1c, 0xb7, 0x95, 0x04, 0x47, 0xb4, 0xf4, 0x3f, 0x69, 0x30, 0xdd, 0x24, 0xf4, 0xfb, + 0xae, 0xb7, 0xdb, 0x72, 0x6d, 0xcb, 0xdc, 0x3f, 0x03, 0x12, 0x80, 0x63, 0x24, 0xe0, 0xb8, 0x7c, + 0x19, 0xf3, 0x2e, 0x8f, 0x0a, 0xe8, 0x1f, 0x69, 0x30, 0x1f, 0xd3, 0xbc, 0x13, 0xe6, 0x03, 0x95, + 0xa0, 0xb5, 0x42, 0x09, 0x3a, 0x06, 0xc3, 0x92, 0x5a, 0x76, 0x82, 0x46, 0x6b, 0x50, 0xa2, 0xae, + 0x8c, 0xde, 0xd1, 0x30, 0x09, 0xf1, 0xc2, 0x9a, 0xd3, 0x71, 0x71, 0x89, 0xba, 0x6c, 0x21, 0x16, + 0x62, 0x5a, 0xd1, 0x8c, 0xf6, 0x90, 0x46, 0x80, 0x61, 0x6c, 0xdb, 0x73, 0xfb, 0x27, 0x1e, 0x83, + 0x5a, 0x88, 0x57, 0x3d, 0xb7, 0x8f, 0x39, 0x96, 0xfe, 0xb1, 0x06, 0x17, 0x63, 0x9a, 0x67, 0xc0, + 0x1b, 0xde, 0x8c, 0xf3, 0x86, 0x6b, 0xa3, 0x0c, 0x24, 0x87, 0x3d, 0x7c, 0x5c, 0x4a, 0x0c, 0x83, + 0x0d, 0x18, 0x6d, 0xc3, 0xd4, 0xc0, 0xed, 0xb6, 0x4f, 0xe1, 0x81, 0x76, 0x96, 0xf1, 0xb9, 0x56, + 0x88, 0x85, 0xa3, 0xc0, 0xe8, 0x1e, 0x5c, 0x64, 0xd4, 0xc2, 0x1f, 0x18, 0x26, 0x69, 0x9f, 0xc2, + 0x95, 0xd5, 0x23, 0xfc, 0x05, 0x28, 0x89, 0x88, 0xd3, 0x9d, 0xa0, 0x75, 0x98, 0xb0, 0x06, 0xfc, + 0x7c, 0x21, 0x89, 0xe4, 0xb1, 0x24, 0x4c, 0x9c, 0x46, 0x44, 0x8a, 0x97, 0x3f, 0x70, 0x80, 0xa1, + 0xff, 0x35, 0x19, 0x0d, 0x9c, 0xae, 0xbe, 0x16, 0xa1, 0x07, 0xf2, 0xad, 0xe6, 0x64, 0xd4, 0xa0, + 0x29, 0x99, 0xc8, 0x49, 0x99, 0x75, 0x35, 0xc1, 0x5b, 0xbe, 0x02, 0x13, 0xc4, 0xe9, 0x72, 0xb2, + 0x2e, 0x2e, 0x42, 0xf8, 0xa8, 0xee, 0x88, 0x26, 0x1c, 0xc8, 0xf4, 0x1f, 0x97, 0x13, 0xa3, 0xe2, + 0x65, 0xf6, 0xbd, 0x53, 0x0b, 0x0e, 0x45, 0xf8, 0x73, 0x03, 0x64, 0x2b, 0xa4, 0x7f, 0x22, 0xe6, + 0xbf, 0x31, 0x4a, 0xcc, 0x47, 0xeb, 0x5f, 0x2e, 0xf9, 0x43, 0xdf, 0x81, 0x71, 0x22, 0xba, 0x10, + 0x55, 0xf5, 0xe6, 0x28, 0x5d, 0x84, 0xe9, 0x37, 0x3c, 0x67, 0xc9, 0x36, 0x89, 0x8a, 0x5e, 0x61, + 0xf3, 0xc5, 0x74, 0xd9, 0xb1, 0x44, 0xb0, 0xe7, 0xc9, 0xc6, 0x63, 0x62, 0xd8, 0xaa, 0xf9, 0xc1, + 0xc1, 0x32, 0x84, 0x3f, 0x71, 0xd4, 0x82, 0xbf, 0x9e, 0xc9, 0x3b, 0x9b, 0xb3, 0xf9, 0x02, 0x69, + 0xb4, 0xd7, 0xb3, 0xd0, 0xb5, 0x53, 0x7b, 0x3d, 0x8b, 0x40, 0x1e, 0x7d, 0x86, 0xfd, 0x67, 0x09, + 0x2e, 0x85, 0xca, 0x85, 0x5f, 0xcf, 0x32, 0x4c, 0xfe, 0xff, 0x15, 0x52, 0xb1, 0x17, 0xad, 0x70, + 0xea, 0xfe, 0xfb, 0x5e, 0xb4, 0x42, 0xdf, 0x72, 0xaa, 0xdd, 0x6f, 0x4a, 0xd1, 0x01, 0x8c, 0xf8, + 0xac, 0x72, 0x0a, 0x1f, 0xe2, 0x7c, 0xe1, 0x5e, 0x66, 0xf4, 0xbf, 0x94, 0xe1, 0x42, 0x72, 0x37, + 0xc6, 0x6e, 0xdf, 0xb5, 0x63, 0x6f, 0xdf, 0x5b, 0x30, 0xb7, 0x3d, 0xb4, 0xed, 0x7d, 0x3e, 0x86, + 0xc8, 0x15, 0xbc, 0xb8, 0xb7, 0xff, 0x92, 0xb4, 0x9c, 0x7b, 0x35, 0x43, 0x07, 0x67, 0x5a, 0xa6, + 0x2f, 0xe3, 0xc7, 0xfe, 0xd3, 0xcb, 0xf8, 0xca, 0x09, 0x2e, 0xe3, 0xb3, 0xdf, 0x33, 0xca, 0x27, + 0x7a, 0xcf, 0x38, 0xc9, 0x4d, 0x7c, 0x46, 0x12, 0x3b, 0xf6, 0xab, 0x92, 0x97, 0x61, 0x26, 0xfe, + 0x3a, 0x24, 0xd6, 0x52, 0x3c, 0x50, 0xc9, 0xb7, 0x98, 0xc8, 0x5a, 0x8a, 0x76, 0xac, 0x34, 0xf4, + 0x43, 0x0d, 0x2e, 0x67, 0x7f, 0x05, 0x82, 0x6c, 0x98, 0xe9, 0x1b, 0xf7, 0xa2, 0x5f, 0xe6, 0x68, + 0x27, 0x64, 0x2b, 0xfc, 0x59, 0x60, 0x3d, 0x86, 0x85, 0x13, 0xd8, 0xe8, 0x1d, 0xa8, 0xf6, 0x8d, + 0x7b, 0xed, 0xa1, 0xd7, 0x23, 0x27, 0x66, 0x45, 0x7c, 0x1b, 0xad, 0x4b, 0x14, 0xac, 0xf0, 0xf4, + 0xcf, 0x35, 0x98, 0xcf, 0xb9, 0xec, 0xff, 0x1f, 0x1a, 0xe5, 0xfb, 0x25, 0xa8, 0xb4, 0x4d, 0xc3, + 0x26, 0x67, 0x40, 0x28, 0x5e, 0x8f, 0x11, 0x8a, 0xe3, 0xbe, 0x26, 0xe5, 0x5e, 0xe5, 0x72, 0x09, + 0x9c, 0xe0, 0x12, 0x4f, 0x15, 0x42, 0x3b, 0x9a, 0x46, 0xbc, 0x00, 0x93, 0xaa, 0xd3, 0xd1, 0xb2, + 0x9b, 0xfe, 0xcb, 0x12, 0x4c, 0x45, 0xba, 0x18, 0x31, 0x37, 0x6e, 0xc7, 0x0a, 0x42, 0xb9, 0xc0, + 0x4d, 0x4b, 0xa4, 0xaf, 0x5a, 0x50, 0x02, 0xc4, 0xd7, 0x10, 0xe1, 0xfb, 0x77, 0xba, 0x32, 0xbc, + 0x0c, 0x33, 0xd4, 0xf0, 0x7a, 0x84, 0x2a, 0xda, 0x2e, 0x2e, 0x19, 0xd5, 0x67, 0x39, 0x9d, 0x98, + 0x14, 0x27, 0xb4, 0x17, 0x5f, 0x82, 0xe9, 0x58, 0x67, 0xa3, 0x7c, 0xcc, 0xd0, 0x58, 0xb9, 0xff, + 0xd9, 0xd2, 0xb9, 0x4f, 0x3e, 0x5b, 0x3a, 0xf7, 0xe9, 0x67, 0x4b, 0xe7, 0x7e, 0x70, 0xb8, 0xa4, + 0xdd, 0x3f, 0x5c, 0xd2, 0x3e, 0x39, 0x5c, 0xd2, 0x3e, 0x3d, 0x5c, 0xd2, 0xfe, 0x7e, 0xb8, 0xa4, + 0xfd, 0xf4, 0xf3, 0xa5, 0x73, 0xef, 0x3c, 0x76, 0xe4, 0xff, 0x6d, 0xf8, 0x77, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xf3, 0x1c, 0xa0, 0x16, 0x14, 0x31, 0x00, 0x00, +} + +func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1963,25 +1556,50 @@ func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { +func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { +func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1991,167 +1609,50 @@ func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { +func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.Driver) - copy(dAtA[i:], m.Driver) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(m.PathPrefix) - copy(dAtA[i:], m.PathPrefix) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *DaemonSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { +func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2788,48 +2289,6 @@ func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2912,64 +2371,6 @@ func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *HostPortRange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *IDRange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - func (m *IPBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3157,7 +2558,7 @@ func (m *IngressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *IngressRule) Marshal() (dAtA []byte, err error) { +func (m *IngressLoadBalancerIngress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3167,35 +2568,44 @@ func (m *IngressRule) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { +func (m *IngressLoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IngressLoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.IngressRuleValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) i-- dAtA[i] = 0x12 - i -= len(m.Host) - copy(dAtA[i:], m.Host) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { +func (m *IngressLoadBalancerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3205,28 +2615,141 @@ func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { +func (m *IngressLoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IngressLoadBalancerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.HTTP != nil { - { - size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *IngressPortStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressPortStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressPortStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + i -= len(*m.Error) + copy(dAtA[i:], *m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Error))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *IngressRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.IngressRuleValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HTTP != nil { + { + size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } @@ -3390,16 +2913,6 @@ func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -3748,7 +3261,7 @@ func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *NetworkPolicyStatus) Marshal() (dAtA []byte, err error) { +func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3758,34 +3271,50 @@ func (m *NetworkPolicyStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *NetworkPolicyStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NetworkPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { +func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3795,18 +3324,28 @@ func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { +func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -3814,21 +3353,21 @@ func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { +func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3838,12 +3377,12 @@ func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { +func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3875,7 +3414,7 @@ func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { +func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3885,19 +3424,32 @@ func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { +func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.RuntimeClass != nil { + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { { - size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -3905,14 +3457,40 @@ func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 + dAtA[i] = 0x12 + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) i-- - dAtA[i] = 0xc2 + dAtA[i] = 0x8 } - if len(m.AllowedCSIDrivers) > 0 { - for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { + return len(dAtA) - i, nil +} + +func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -3920,246 +3498,148 @@ func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba + dAtA[i] = 0x32 } } - if m.RunAsGroup != nil { - { - size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 - } - if len(m.AllowedProcMountTypes) > 0 { - for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedProcMountTypes[iNdEx]) - copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa - } - } - if len(m.ForbiddenSysctls) > 0 { - for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ForbiddenSysctls[iNdEx]) - copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } - } - if len(m.AllowedUnsafeSysctls) > 0 { - for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedUnsafeSysctls[iNdEx]) - copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a - } - } - if len(m.AllowedFlexVolumes) > 0 { - for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 - } - } - if len(m.AllowedHostPaths) > 0 { - for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - } - } - if m.AllowPrivilegeEscalation != nil { - i-- - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - } - if m.DefaultAllowPrivilegeEscalation != nil { - i-- - if *m.DefaultAllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x78 - } - i-- - if m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x70 - { - size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) i-- - dAtA[i] = 0x6a - { - size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) i-- - dAtA[i] = 0x62 - { - size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) i-- - dAtA[i] = 0x5a - { - size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) i-- - dAtA[i] = 0x52 + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) i-- - dAtA[i] = 0x48 - i-- - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0x40 - if len(m.HostPorts) > 0 { - for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + return dAtA[:n], nil +} + +func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x3a + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - i-- - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Volumes[iNdEx]) - copy(dAtA[i:], m.Volumes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) - i-- - dAtA[i] = 0x2a + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - if len(m.AllowedCapabilities) > 0 { - for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedCapabilities[iNdEx]) - copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x22 - } + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.RequiredDropCapabilities) > 0 { - for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RequiredDropCapabilities[iNdEx]) - copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x1a + return dAtA[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if len(m.DefaultAddCapabilities) > 0 { - for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.DefaultAddCapabilities[iNdEx]) - copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x12 + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - i-- - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { +func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4169,12 +3649,12 @@ func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { +func (m *Scale) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -4212,7 +3692,7 @@ func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { +func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4222,50 +3702,23 @@ func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { +func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { +func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4275,687 +3728,608 @@ func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { +func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- dAtA[i] = 0x12 } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - return dAtA[:n], nil + dAtA[offset] = uint8(v) + return base +} +func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - i-- - dAtA[i] = 0x20 - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x1a + return n +} + +func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - i-- - dAtA[i] = 0x8 + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + n += 1 + sovGenerated(uint64(m.TemplateGeneration)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) } - return len(dAtA) - i, nil + return n } -func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l + n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) + n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberReady)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberAvailable)) + n += 1 + sovGenerated(uint64(m.NumberUnavailable)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return n } -func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *Deployment) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if m.MaxSurge != nil { - { - size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return n } -func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeploymentRollback) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if m.MaxSurge != nil { - { - size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UpdatedAnnotations) > 0 { + for k, v := range m.UpdatedAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.RollbackTo != nil { + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n } -func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) } - return dAtA[:n], nil + return n } -func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *HTTPIngressPath) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PathType != nil { + l = len(*m.PathType) + n += 1 + l + sovGenerated(uint64(l)) } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *HTTPIngressRuleValue) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if m.DefaultRuntimeClassName != nil { - i -= len(*m.DefaultRuntimeClassName) - copy(dAtA[i:], *m.DefaultRuntimeClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) - i-- - dAtA[i] = 0x12 - } - if len(m.AllowedRuntimeClassNames) > 0 { - for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedRuntimeClassNames[iNdEx]) - copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) - i-- - dAtA[i] = 0xa + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - return len(dAtA) - i, nil + return n } -func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IPBlock) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if m.SELinuxOptions != nil { - { - size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = len(m.CIDR) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Except) > 0 { + for _, s := range m.Except { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x12 } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *Scale) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *Ingress) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *IngressBackend) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ServicePort.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *IngressList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + return n +} + +func (m *IngressLoadBalancerIngress) Size() (n int) { + if m == nil { + return 0 } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IngressLoadBalancerStatus) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + var l int + _ = l + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *IngressPortStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + if m.Error != nil { + l = len(*m.Error) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IngressRule) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil + var l int + _ = l + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = m.IngressRuleValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *IngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HTTP != nil { + l = m.HTTP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *IngressSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - i -= len(m.TargetSelector) - copy(dAtA[i:], m.TargetSelector) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i-- - dAtA[i] = 0x1a - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { - v := m.Selector[string(keysForSelector[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForSelector[iNdEx]) - copy(dAtA[i:], keysForSelector[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } + if m.Backend != nil { + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if len(m.TLS) > 0 { + for _, e := range m.TLS { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil -} - -func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ + if m.IngressClassName != nil { + l = len(*m.IngressClassName) + n += 1 + l + sovGenerated(uint64(l)) } - dAtA[offset] = uint8(v) - return base + return n } -func (m *AllowedCSIDriver) Size() (n int) { + +func (m *IngressStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) + l = m.LoadBalancer.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *AllowedFlexVolume) Size() (n int) { +func (m *IngressTLS) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Driver) + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SecretName) n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *AllowedHostPath) Size() (n int) { +func (m *NetworkPolicy) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.PathPrefix) + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) - n += 2 return n } -func (m *DaemonSet) Size() (n int) { +func (m *NetworkPolicyEgressRule) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.To) > 0 { + for _, e := range m.To { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *DaemonSetCondition) Size() (n int) { +func (m *NetworkPolicyIngressRule) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.From) > 0 { + for _, e := range m.From { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *DaemonSetList) Size() (n int) { +func (m *NetworkPolicyList) Size() (n int) { if m == nil { return 0 } @@ -4972,70 +4346,77 @@ func (m *DaemonSetList) Size() (n int) { return n } -func (m *DaemonSetSpec) Size() (n int) { +func (m *NetworkPolicyPeer) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Selector != nil { - l = m.Selector.Size() + if m.PodSelector != nil { + l = m.PodSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.UpdateStrategy.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - n += 1 + sovGenerated(uint64(m.TemplateGeneration)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IPBlock != nil { + l = m.IPBlock.Size() + n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *DaemonSetStatus) Size() (n int) { +func (m *NetworkPolicyPort) Size() (n int) { if m == nil { return 0 } var l int _ = l - n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) - n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberReady)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberAvailable)) - n += 1 + sovGenerated(uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EndPort != nil { + n += 1 + sovGenerated(uint64(*m.EndPort)) } return n } -func (m *DaemonSetUpdateStrategy) Size() (n int) { +func (m *NetworkPolicySpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Type) + l = m.PodSelector.Size() n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Egress) > 0 { + for _, e := range m.Egress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PolicyTypes) > 0 { + for _, s := range m.PolicyTypes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } return n } -func (m *Deployment) Size() (n int) { +func (m *ReplicaSet) Size() (n int) { if m == nil { return 0 } @@ -5050,7 +4431,7 @@ func (m *Deployment) Size() (n int) { return n } -func (m *DeploymentCondition) Size() (n int) { +func (m *ReplicaSetCondition) Size() (n int) { if m == nil { return 0 } @@ -5060,18 +4441,16 @@ func (m *DeploymentCondition) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Status) n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) l = len(m.Reason) n += 1 + l + sovGenerated(uint64(l)) l = len(m.Message) n += 1 + l + sovGenerated(uint64(l)) - l = m.LastUpdateTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *DeploymentList) Size() (n int) { +func (m *ReplicaSetList) Size() (n int) { if m == nil { return 0 } @@ -5088,28 +4467,7 @@ func (m *DeploymentList) Size() (n int) { return n } -func (m *DeploymentRollback) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.UpdatedAnnotations) > 0 { - for k, v := range m.UpdatedAnnotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = m.RollbackTo.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DeploymentSpec) Size() (n int) { +func (m *ReplicaSetSpec) Size() (n int) { if m == nil { return 0 } @@ -5124,151 +4482,75 @@ func (m *DeploymentSpec) Size() (n int) { } l = m.Template.Size() n += 1 + l + sovGenerated(uint64(l)) - l = m.Strategy.Size() - n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) - } - n += 2 - if m.RollbackTo != nil { - l = m.RollbackTo.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ProgressDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) - } return n } -func (m *DeploymentStatus) Size() (n int) { +func (m *ReplicaSetStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) if len(m.Conditions) > 0 { for _, e := range m.Conditions { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) - } - return n -} - -func (m *DeploymentStrategy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } -func (m *FSGroupStrategyOptions) Size() (n int) { +func (m *RollbackConfig) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } + n += 1 + sovGenerated(uint64(m.Revision)) return n } -func (m *HTTPIngressPath) Size() (n int) { +func (m *RollingUpdateDaemonSet) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.PathType != nil { - l = len(*m.PathType) + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() n += 1 + l + sovGenerated(uint64(l)) } - return n -} - -func (m *HTTPIngressRuleValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Paths) > 0 { - for _, e := range m.Paths { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HostPortRange) Size() (n int) { - if m == nil { - return 0 + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) return n } -func (m *IDRange) Size() (n int) { +func (m *RollingUpdateDeployment) Size() (n int) { if m == nil { return 0 } var l int _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) - return n -} - -func (m *IPBlock) Size() (n int) { - if m == nil { - return 0 + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) } - var l int - _ = l - l = len(m.CIDR) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Except) > 0 { - for _, s := range m.Except { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *Ingress) Size() (n int) { +func (m *Scale) Size() (n int) { if m == nil { return 0 } @@ -5283,3625 +4565,713 @@ func (m *Ingress) Size() (n int) { return n } -func (m *IngressBackend) Size() (n int) { +func (m *ScaleSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ServicePort.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + n += 1 + sovGenerated(uint64(m.Replicas)) return n } -func (m *IngressList) Size() (n int) { +func (m *ScaleStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - return n -} - -func (m *IngressRule) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - l = m.IngressRuleValue.Size() + l = len(m.TargetSelector) n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *IngressRuleValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.HTTP != nil { - l = m.HTTP.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 } - -func (m *IngressSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Backend != nil { - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DaemonSet) String() string { + if this == nil { + return "nil" } - if len(m.TLS) > 0 { - for _, e := range m.TLS { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&DaemonSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" } - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetList) String() string { + if this == nil { + return "nil" } - if m.IngressClassName != nil { - l = len(*m.IngressClassName) - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," } - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&DaemonSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *IngressStatus) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetSpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.LoadBalancer.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&DaemonSetSpec{`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `}`, + }, "") + return s } - -func (m *IngressTLS) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Hosts) > 0 { - for _, s := range m.Hosts { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," } - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - return n + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DaemonSetStatus{`, + `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, + `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, + `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, + `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, + `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, + `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicy) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetUpdateStrategy) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyEgressRule) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.To) > 0 { - for _, e := range m.To { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *Deployment) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyIngressRule) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.From) > 0 { - for _, e := range m.From { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyList) Size() (n int) { - if m == nil { - return 0 +func (this *DeploymentList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," } - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyPeer) Size() (n int) { - if m == nil { - return 0 +func (this *DeploymentRollback) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.PodSelector != nil { - l = m.PodSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) + for k := range this.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + mapStringForUpdatedAnnotations := "map[string]string{" + for _, k := range keysForUpdatedAnnotations { + mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) } - if m.IPBlock != nil { - l = m.IPBlock.Size() - n += 1 + l + sovGenerated(uint64(l)) + mapStringForUpdatedAnnotations += "}" + s := strings.Join([]string{`&DeploymentRollback{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, + `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyPort) Size() (n int) { - if m == nil { - return 0 +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.Protocol != nil { - l = len(*m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," } - if m.Port != nil { - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" } - if m.EndPort != nil { - n += 1 + sovGenerated(uint64(*m.EndPort)) + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPIngressPath) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&HTTPIngressPath{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, + `PathType:` + valueToStringGenerated(this.PathType) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicySpec) Size() (n int) { - if m == nil { - return 0 +func (this *HTTPIngressRuleValue) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.PodSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForPaths := "[]HTTPIngressPath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," } - if len(m.Egress) > 0 { - for _, e := range m.Egress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.PolicyTypes) > 0 { - for _, s := range m.PolicyTypes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n + repeatedStringForPaths += "}" + s := strings.Join([]string{`&HTTPIngressRuleValue{`, + `Paths:` + repeatedStringForPaths + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyStatus) Size() (n int) { - if m == nil { - return 0 +func (this *IPBlock) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&IPBlock{`, + `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, + `Except:` + fmt.Sprintf("%v", this.Except) + `,`, + `}`, + }, "") + return s +} +func (this *Ingress) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&Ingress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicy) Size() (n int) { - if m == nil { - return 0 +func (this *IngressBackend) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&IngressBackend{`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicyList) Size() (n int) { - if m == nil { - return 0 +func (this *IngressList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForItems := "[]Ingress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," } - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&IngressList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicySpec) Size() (n int) { - if m == nil { - return 0 +func (this *IngressLoadBalancerIngress) String() string { + if this == nil { + return "nil" } - var l int - _ = l - n += 2 - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForPorts := "[]IngressPortStatus{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "IngressPortStatus", "IngressPortStatus", 1), `&`, ``, 1) + "," } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&IngressLoadBalancerIngress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `}`, + }, "") + return s +} +func (this *IngressLoadBalancerStatus) String() string { + if this == nil { + return "nil" } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForIngress := "[]IngressLoadBalancerIngress{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "IngressLoadBalancerIngress", "IngressLoadBalancerIngress", 1), `&`, ``, 1) + "," } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForIngress += "}" + s := strings.Join([]string{`&IngressLoadBalancerStatus{`, + `Ingress:` + repeatedStringForIngress + `,`, + `}`, + }, "") + return s +} +func (this *IngressPortStatus) String() string { + if this == nil { + return "nil" } - n += 2 - if len(m.HostPorts) > 0 { - for _, e := range m.HostPorts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&IngressPortStatus{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Error:` + valueToStringGenerated(this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *IngressRule) String() string { + if this == nil { + return "nil" } - n += 2 - n += 2 - l = m.SELinux.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.RunAsUser.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.SupplementalGroups.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FSGroup.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.DefaultAllowPrivilegeEscalation != nil { - n += 2 + s := strings.Join([]string{`&IngressRule{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressRuleValue) String() string { + if this == nil { + return "nil" } - if m.AllowPrivilegeEscalation != nil { - n += 3 + s := strings.Join([]string{`&IngressRuleValue{`, + `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressSpec) String() string { + if this == nil { + return "nil" } - if len(m.AllowedHostPaths) > 0 { - for _, e := range m.AllowedHostPaths { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + repeatedStringForTLS := "[]IngressTLS{" + for _, f := range this.TLS { + repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," } - if len(m.AllowedFlexVolumes) > 0 { - for _, e := range m.AllowedFlexVolumes { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + repeatedStringForTLS += "}" + repeatedStringForRules := "[]IngressRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } + repeatedStringForRules += "}" + s := strings.Join([]string{`&IngressSpec{`, + `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + repeatedStringForTLS + `,`, + `Rules:` + repeatedStringForRules + `,`, + `IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`, + `}`, + }, "") + return s +} +func (this *IngressStatus) String() string { + if this == nil { + return "nil" } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&IngressStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "IngressLoadBalancerStatus", "IngressLoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressTLS) String() string { + if this == nil { + return "nil" } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&IngressTLS{`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicy) String() string { + if this == nil { + return "nil" } - if m.RunAsGroup != nil { - l = m.RunAsGroup.Size() - n += 2 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&NetworkPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyEgressRule) String() string { + if this == nil { + return "nil" } - if len(m.AllowedCSIDrivers) > 0 { - for _, e := range m.AllowedCSIDrivers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," } - if m.RuntimeClass != nil { - l = m.RuntimeClass.Size() - n += 2 + l + sovGenerated(uint64(l)) + repeatedStringForPorts += "}" + repeatedStringForTo := "[]NetworkPolicyPeer{" + for _, f := range this.To { + repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," } - return n + repeatedStringForTo += "}" + s := strings.Join([]string{`&NetworkPolicyEgressRule{`, + `Ports:` + repeatedStringForPorts + `,`, + `To:` + repeatedStringForTo + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSet) Size() (n int) { - if m == nil { - return 0 +func (this *NetworkPolicyIngressRule) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ReplicaSetCondition) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ReplicaSetList) Size() (n int) { - if m == nil { - return 0 + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForPorts += "}" + repeatedStringForFrom := "[]NetworkPolicyPeer{" + for _, f := range this.From { + repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," } - return n + repeatedStringForFrom += "}" + s := strings.Join([]string{`&NetworkPolicyIngressRule{`, + `Ports:` + repeatedStringForPorts + `,`, + `From:` + repeatedStringForFrom + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) +func (this *NetworkPolicyList) String() string { + if this == nil { + return "nil" } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForItems := "[]NetworkPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&NetworkPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *NetworkPolicyPeer) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&NetworkPolicyPeer{`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, + `}`, + }, "") + return s } - -func (m *RollbackConfig) Size() (n int) { - if m == nil { - return 0 +func (this *NetworkPolicyPort) String() string { + if this == nil { + return "nil" } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Revision)) - return n + s := strings.Join([]string{`&NetworkPolicyPort{`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, + `EndPort:` + valueToStringGenerated(this.EndPort) + `,`, + `}`, + }, "") + return s } - -func (m *RollingUpdateDaemonSet) Size() (n int) { - if m == nil { - return 0 +func (this *NetworkPolicySpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForIngress := "[]NetworkPolicyIngressRule{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," } - if m.MaxSurge != nil { - l = m.MaxSurge.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForIngress += "}" + repeatedStringForEgress := "[]NetworkPolicyEgressRule{" + for _, f := range this.Egress { + repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," } - return n + repeatedStringForEgress += "}" + s := strings.Join([]string{`&NetworkPolicySpec{`, + `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + repeatedStringForIngress + `,`, + `Egress:` + repeatedStringForEgress + `,`, + `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, + `}`, + }, "") + return s } - -func (m *RollingUpdateDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.MaxSurge != nil { - l = m.MaxSurge.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (this *ReplicaSet) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&ReplicaSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *RunAsGroupStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *ReplicaSetCondition) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&ReplicaSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s } - -func (m *RunAsUserStrategyOptions) Size() (n int) { - if m == nil { - return 0 +func (this *ReplicaSetList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForItems := "[]ReplicaSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," } - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&ReplicaSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *RuntimeClassStrategyOptions) Size() (n int) { - if m == nil { - return 0 +func (this *ReplicaSetSpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.AllowedRuntimeClassNames) > 0 { - for _, s := range m.AllowedRuntimeClassNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&ReplicaSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetStatus) String() string { + if this == nil { + return "nil" } - if m.DefaultRuntimeClassName != nil { - l = len(*m.DefaultRuntimeClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *SELinuxStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Scale) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ScaleSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - return n -} - -func (m *ScaleStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.TargetSelector) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SupplementalGroupsStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AllowedCSIDriver) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedCSIDriver{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedFlexVolume) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedFlexVolume{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedHostPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedHostPath{`, - `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]DaemonSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]DaemonSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," } repeatedStringForConditions += "}" - s := strings.Join([]string{`&DaemonSetStatus{`, - `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, - `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, - `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, - `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, + s := strings.Join([]string{`&ReplicaSetStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, - `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, - `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s } -func (this *DaemonSetUpdateStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Deployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Deployment{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentRollback) String() string { - if this == nil { - return "nil" - } - keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) - for k := range this.UpdatedAnnotations { - keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - mapStringForUpdatedAnnotations := "map[string]string{" - for _, k := range keysForUpdatedAnnotations { - mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) - } - mapStringForUpdatedAnnotations += "}" - s := strings.Join([]string{`&DeploymentRollback{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, - `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, - `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]DeploymentCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&DeploymentStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, - `}`, - }, "") - return s -} -func (this *FSGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&FSGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *HTTPIngressPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HTTPIngressPath{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, - `PathType:` + valueToStringGenerated(this.PathType) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPIngressRuleValue) String() string { - if this == nil { - return "nil" - } - repeatedStringForPaths := "[]HTTPIngressPath{" - for _, f := range this.Paths { - repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," - } - repeatedStringForPaths += "}" - s := strings.Join([]string{`&HTTPIngressRuleValue{`, - `Paths:` + repeatedStringForPaths + `,`, - `}`, - }, "") - return s -} -func (this *HostPortRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HostPortRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *IDRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IDRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *IPBlock) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IPBlock{`, - `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, - `Except:` + fmt.Sprintf("%v", this.Except) + `,`, - `}`, - }, "") - return s -} -func (this *Ingress) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Ingress{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressBackend) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressBackend{`, - `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Ingress{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&IngressList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *IngressRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressRule{`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressRuleValue) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressRuleValue{`, - `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForTLS := "[]IngressTLS{" - for _, f := range this.TLS { - repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," - } - repeatedStringForTLS += "}" - repeatedStringForRules := "[]IngressRule{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForRules += "}" - s := strings.Join([]string{`&IngressSpec{`, - `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, - `TLS:` + repeatedStringForTLS + `,`, - `Rules:` + repeatedStringForRules + `,`, - `IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`, - `}`, - }, "") - return s -} -func (this *IngressStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LoadBalancer), "LoadBalancerStatus", "v11.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressTLS) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressTLS{`, - `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NetworkPolicyStatus", "NetworkPolicyStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyEgressRule) String() string { - if this == nil { - return "nil" - } - repeatedStringForPorts := "[]NetworkPolicyPort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - repeatedStringForTo := "[]NetworkPolicyPeer{" - for _, f := range this.To { - repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," - } - repeatedStringForTo += "}" - s := strings.Join([]string{`&NetworkPolicyEgressRule{`, - `Ports:` + repeatedStringForPorts + `,`, - `To:` + repeatedStringForTo + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyIngressRule) String() string { - if this == nil { - return "nil" - } - repeatedStringForPorts := "[]NetworkPolicyPort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - repeatedStringForFrom := "[]NetworkPolicyPeer{" - for _, f := range this.From { - repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," - } - repeatedStringForFrom += "}" - s := strings.Join([]string{`&NetworkPolicyIngressRule{`, - `Ports:` + repeatedStringForPorts + `,`, - `From:` + repeatedStringForFrom + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]NetworkPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&NetworkPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyPeer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyPeer{`, - `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyPort) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyPort{`, - `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, - `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, - `EndPort:` + valueToStringGenerated(this.EndPort) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicySpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForIngress := "[]NetworkPolicyIngressRule{" - for _, f := range this.Ingress { - repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForIngress += "}" - repeatedStringForEgress := "[]NetworkPolicyEgressRule{" - for _, f := range this.Egress { - repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForEgress += "}" - s := strings.Join([]string{`&NetworkPolicySpec{`, - `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Ingress:` + repeatedStringForIngress + `,`, - `Egress:` + repeatedStringForEgress + `,`, - `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&NetworkPolicyStatus{`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicyList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodSecurityPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicySpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForHostPorts := "[]HostPortRange{" - for _, f := range this.HostPorts { - repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," - } - repeatedStringForHostPorts += "}" - repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" - for _, f := range this.AllowedHostPaths { - repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedHostPaths += "}" - repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" - for _, f := range this.AllowedFlexVolumes { - repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedFlexVolumes += "}" - repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" - for _, f := range this.AllowedCSIDrivers { - repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedCSIDrivers += "}" - s := strings.Join([]string{`&PodSecurityPolicySpec{`, - `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, - `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, - `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, - `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, - `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, - `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + repeatedStringForHostPorts + `,`, - `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, - `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, - `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, - `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, - `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, - `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, - `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, - `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, - `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, - `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, - `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, - `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, - `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, - `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, - `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ReplicaSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]ReplicaSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&ReplicaSetStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *RollbackConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollbackConfig{`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateDaemonSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateDeployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RunAsGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *RunAsUserStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&RunAsUserStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *RuntimeClassStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, - `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, - `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, - `}`, - }, "") - return s -} -func (this *SELinuxStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SELinuxStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Scale) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScaleSpec{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleStatus) String() string { - if this == nil { - return "nil" - } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) - } - mapStringForSelector += "}" - s := strings.Join([]string{`&ScaleStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `Selector:` + mapStringForSelector + `,`, - `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, - `}`, - }, "") - return s -} -func (this *SupplementalGroupsStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedCSIDriver: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedCSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PathPrefix = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, DaemonSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TemplateGeneration", wireType) - } - m.TemplateGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TemplateGeneration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) - } - m.CurrentNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentNumberScheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) - } - m.NumberMisscheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberMisscheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) - } - m.DesiredNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DesiredNumberScheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) - } - m.NumberReady = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberReady |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) - } - m.UpdatedNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UpdatedNumberScheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) - } - m.NumberAvailable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberAvailable |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) - } - m.NumberUnavailable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberUnavailable |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.CollisionCount = &v - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, DaemonSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDaemonSet{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Deployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Deployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Deployment{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UpdatedAnnotations == nil { - m.UpdatedAnnotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.UpdatedAnnotations[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } +func (this *RollbackConfig) String() string { + if this == nil { + return "nil" } - - if iNdEx > l { - return io.ErrUnexpectedEOF + s := strings.Join([]string{`&RollbackConfig{`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDaemonSet) String() string { + if this == nil { + return "nil" } - return nil + s := strings.Join([]string{`&RollingUpdateDaemonSet{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s } -func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { +func (this *RollingUpdateDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Scale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Scale{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleSpec{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleStatus) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DaemonSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8924,104 +5294,15 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9048,72 +5329,13 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Paused = bool(v != 0) - case 8: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9140,18 +5362,15 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RollbackTo == nil { - m.RollbackTo = &RollbackConfig{} - } - if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) - } - var v int32 + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9161,12 +5380,25 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.ProgressDeadlineSeconds = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9188,7 +5420,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9211,17 +5443,17 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.ObservedGeneration = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9231,54 +5463,29 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - m.UpdatedReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UpdatedReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if postIndex > l { + return io.ErrUnexpectedEOF } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - m.AvailableReplicas = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9288,33 +5495,27 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.UnavailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UnavailableReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 6: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9341,103 +5542,13 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, DeploymentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) - } - m.ReadyReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.CollisionCount = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -9465,13 +5576,13 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9481,27 +5592,23 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDeployment{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -9524,7 +5631,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } return nil } -func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *DaemonSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9547,17 +5654,17 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9567,27 +5674,28 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = FSGroupStrategyType(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9614,8 +5722,8 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, DaemonSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9640,7 +5748,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { +func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9663,17 +5771,17 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9683,27 +5791,31 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9730,15 +5842,15 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9748,25 +5860,83 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := PathType(dAtA[iNdEx:postIndex]) - m.PathType = &s + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateGeneration", wireType) + } + m.TemplateGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TemplateGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9788,7 +5958,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } return nil } -func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { +func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9798,28 +5968,200 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if shift >= 64 { return ErrIntOverflowGenerated } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) + } + m.CurrentNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentNumberScheduled |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) + } + m.NumberMisscheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberMisscheduled |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) + } + m.DesiredNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredNumberScheduled |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) + } + m.NumberReady = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberReady |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) + } + m.UpdatedNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) + } + m.NumberAvailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberAvailable |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.NumberUnavailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberUnavailable |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9846,8 +6188,8 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Paths = append(m.Paths, HTTPIngressPath{}) - if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9872,7 +6214,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } return nil } -func (m *HostPortRange) Unmarshal(dAtA []byte) error { +func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9895,17 +6237,17 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.Min = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9915,16 +6257,29 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) } - m.Max = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9934,11 +6289,28 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDaemonSet{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9960,7 +6332,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } return nil } -func (m *IDRange) Unmarshal(dAtA []byte) error { +func (m *Deployment) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9983,36 +6355,17 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IDRange: wiretype end group for non-group") + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) - } - m.Min = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Min |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - m.Max = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10022,66 +6375,30 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IPBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10091,29 +6408,30 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.CIDR = string(dAtA[iNdEx:postIndex]) + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10123,23 +6441,24 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -10162,7 +6481,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } return nil } -func (m *Ingress) Unmarshal(dAtA []byte) error { +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10185,17 +6504,17 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10205,30 +6524,29 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10238,30 +6556,29 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10271,78 +6588,27 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressBackend) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10370,11 +6636,11 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ServiceName = string(dAtA[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10401,13 +6667,13 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10434,10 +6700,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Resource == nil { - m.Resource = &v11.TypedLocalObjectReference{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10462,7 +6725,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressList) Unmarshal(dAtA []byte) error { +func (m *DeploymentList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10485,10 +6748,10 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -10553,7 +6816,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Ingress{}) + m.Items = append(m.Items, Deployment{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -10579,7 +6842,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressRule) Unmarshal(dAtA []byte) error { +func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10602,15 +6865,15 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10638,11 +6901,11 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10669,63 +6932,107 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.UpdatedAnnotations == nil { + m.UpdatedAnnotations = make(map[string]string) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.UpdatedAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10752,10 +7059,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.HTTP == nil { - m.HTTP = &HTTPIngressRuleValue{} - } - if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10780,7 +7084,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressSpec) Unmarshal(dAtA []byte) error { +func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10803,15 +7107,35 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10838,16 +7162,16 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Backend == nil { - m.Backend = &IngressBackend{} + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10874,14 +7198,13 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TLS = append(m.TLS, IngressTLS{}) - if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10908,16 +7231,74 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, IngressRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10927,25 +7308,48 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.IngressClassName = &s + if m.RollbackTo == nil { + m.RollbackTo = &RollbackConfig{} + } + if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressDeadlineSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -10967,7 +7371,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressStatus) Unmarshal(dAtA []byte) error { +func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10986,19 +7390,114 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if b < 0x80 { break } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnavailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11025,10 +7524,50 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11050,7 +7589,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressTLS) Unmarshal(dAtA []byte) error { +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11073,15 +7612,15 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -11109,13 +7648,13 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11125,23 +7664,27 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretName = string(dAtA[iNdEx:postIndex]) + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -11164,7 +7707,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { +func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11187,17 +7730,17 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11207,28 +7750,27 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11255,15 +7797,15 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11273,24 +7815,24 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := PathType(dAtA[iNdEx:postIndex]) + m.PathType = &s iNdEx = postIndex default: iNdEx = preIndex @@ -11313,7 +7855,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { +func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11336,49 +7878,15 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") + return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, NetworkPolicyPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11405,8 +7913,8 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.To = append(m.To, NetworkPolicyPeer{}) - if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Paths = append(m.Paths, HTTPIngressPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11431,7 +7939,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { +func (m *IPBlock) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11454,17 +7962,17 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11474,31 +7982,29 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, NetworkPolicyPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.CIDR = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11508,25 +8014,23 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.From = append(m.From, NetworkPolicyPeer{}) - if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -11549,7 +8053,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { +func (m *Ingress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11572,15 +8076,48 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11607,13 +8144,13 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11640,8 +8177,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, NetworkPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11666,7 +8202,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { +func (m *IngressBackend) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11689,17 +8225,17 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") + return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11709,31 +8245,27 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PodSelector == nil { - m.PodSelector = &v1.LabelSelector{} - } - if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ServiceName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11760,16 +8292,13 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11796,10 +8325,10 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.IPBlock == nil { - m.IPBlock = &IPBlock{} + if m.Resource == nil { + m.Resource = &v11.TypedLocalObjectReference{} } - if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11824,7 +8353,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { +func (m *IngressList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11847,17 +8376,17 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") + return fmt.Errorf("proto: IngressList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11867,28 +8396,28 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) - m.Protocol = &s + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11915,33 +8444,11 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Port == nil { - m.Port = &intstr.IntOrString{} - } - if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, Ingress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EndPort = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11963,7 +8470,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { +func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11986,17 +8493,17 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12006,30 +8513,29 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12039,29 +8545,27 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12088,16 +8592,66 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) - if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, IngressPortStatus{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12107,23 +8661,25 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) + m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -12146,7 +8702,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { +func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12169,17 +8725,36 @@ func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyStatus: wiretype end group for non-group") + return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12189,25 +8764,56 @@ func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, v1.Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Error = &s iNdEx = postIndex default: iNdEx = preIndex @@ -12230,7 +8836,7 @@ func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { +func (m *IngressRule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12253,17 +8859,17 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicy: wiretype end group for non-group") + return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12273,28 +8879,27 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Host = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12321,7 +8926,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12346,7 +8951,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { +func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12369,15 +8974,15 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicyList: wiretype end group for non-group") + return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12404,41 +9009,10 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.HTTP == nil { + m.HTTP = &HTTPIngressRuleValue{} } - m.Items = append(m.Items, PodSecurityPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12463,7 +9037,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { +func (m *IngressSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12486,37 +9060,17 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicySpec: wiretype end group for non-group") + return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Privileged = bool(v != 0) - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12526,29 +9080,33 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + if m.Backend == nil { + m.Backend = &IngressBackend{} + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12558,29 +9116,31 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + m.TLS = append(m.TLS, IngressTLS{}) + if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12590,27 +9150,29 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + m.Rules = append(m.Rules, IngressRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -12638,105 +9200,62 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Volumes = append(m.Volumes, FSType(dAtA[iNdEx:postIndex])) + s := string(dAtA[iNdEx:postIndex]) + m.IngressClassName = &s iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.HostNetwork = bool(v != 0) - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPorts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.HostPorts = append(m.HostPorts, HostPortRange{}) - if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - m.HostPID = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) + if iNdEx >= l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.HostIPC = bool(v != 0) - case 10: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinux", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12763,48 +9282,65 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.SELinux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - if msglen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressTLS) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - if err := m.RunAsUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 12: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12814,30 +9350,29 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.SupplementalGroups.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 13: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12847,90 +9382,77 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.FSGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.SecretName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated } - m.ReadOnlyRootFilesystem = bool(v != 0) - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAllowPrivilegeEscalation", wireType) + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - b := bool(v != 0) - m.DefaultAllowPrivilegeEscalation = &b - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) + if iNdEx >= l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - b := bool(v != 0) - m.AllowPrivilegeEscalation = &b - case 17: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12957,14 +9479,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedHostPaths = append(m.AllowedHostPaths, AllowedHostPath{}) - if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 18: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12991,16 +9512,65 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) - if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 19: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13010,29 +9580,31 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 20: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13042,29 +9614,81 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) + m.To = append(m.To, NetworkPolicyPeer{}) + if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 21: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedProcMountTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13074,27 +9698,29 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 22: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13121,16 +9747,64 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RunAsGroup == nil { - m.RunAsGroup = &RunAsGroupStrategyOptions{} - } - if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.From = append(m.From, NetworkPolicyPeer{}) + if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 23: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13157,14 +9831,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) - if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 24: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13191,10 +9864,8 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RuntimeClass == nil { - m.RuntimeClass = &RuntimeClassStrategyOptions{} - } - if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, NetworkPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13219,7 +9890,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSet) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13242,15 +9913,15 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13277,13 +9948,16 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.PodSelector == nil { + m.PodSelector = &v1.LabelSelector{} + } + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13310,13 +9984,16 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13343,7 +10020,10 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.IPBlock == nil { + m.IPBlock = &IPBlock{} + } + if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13368,7 +10048,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13391,15 +10071,15 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13427,43 +10107,12 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) + s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Protocol = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13490,47 +10139,18 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if m.Port == nil { + m.Port = &intstr.IntOrString{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13540,24 +10160,12 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.EndPort = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -13579,7 +10187,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13602,15 +10210,15 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13637,13 +10245,13 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13670,11 +10278,77 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ReplicaSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) + if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -13696,7 +10370,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { +func (m *ReplicaSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13719,17 +10393,17 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13739,15 +10413,28 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Replicas = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13774,16 +10461,13 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13810,29 +10494,10 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -13854,7 +10519,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13877,17 +10542,17 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.Replicas = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13897,35 +10562,29 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.FullyLabeledReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FullyLabeledReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - m.ObservedGeneration = 0 + m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13935,35 +10594,29 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.ReadyReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } - m.AvailableReplicas = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13973,16 +10626,30 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 6: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13992,81 +10659,29 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, ReplicaSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollbackConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - m.Revision = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14076,11 +10691,24 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -14102,7 +10730,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14125,15 +10753,15 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14160,16 +10788,13 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14196,10 +10821,8 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxSurge == nil { - m.MaxSurge = &intstr.IntOrString{} - } - if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ReplicaSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14224,7 +10847,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } return nil } -func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14247,15 +10870,35 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14282,16 +10925,16 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14318,13 +10961,29 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxSurge == nil { - m.MaxSurge = &intstr.IntOrString{} - } - if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -14346,7 +11005,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14369,17 +11028,74 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) } - var stringLen uint64 + m.ReadyReplicas = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14389,27 +11105,33 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14436,8 +11158,8 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, ReplicaSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14462,7 +11184,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RollbackConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14485,49 +11207,17 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) } - var msglen int + m.Revision = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14537,26 +11227,11 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -14578,7 +11253,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14601,17 +11276,17 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14621,29 +11296,33 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14653,24 +11332,27 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.DefaultRuntimeClassName = &s + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -14693,7 +11375,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14716,17 +11398,17 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SELinuxStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SELinuxStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14736,27 +11418,31 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = SELinuxStrategy(dAtA[iNdEx:postIndex]) + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14783,10 +11469,10 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SELinuxOptions == nil { - m.SELinuxOptions = &v11.SELinuxOptions{} + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} } - if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -15257,122 +11943,6 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = SupplementalGroupsStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index eaa63b59f..3f2549681 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -30,37 +30,6 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/extensions/v1beta1"; -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -message AllowedCSIDriver { - // Name is the registered name of the CSI driver - optional string name = 1; -} - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -// Deprecated: use AllowedFlexVolume from policy API Group instead. -message AllowedFlexVolume { - // driver is the name of the Flexvolume driver. - optional string driver = 1; -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -// Deprecated: use AllowedHostPath from policy API Group instead. -message AllowedHostPath { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - optional string pathPrefix = 1; - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - optional bool readOnly = 2; -} - // DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for // more information. // DaemonSet represents the configuration of a daemon set. @@ -398,19 +367,6 @@ message DeploymentStrategy { optional RollingUpdateDeployment rollingUpdate = 2; } -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use FSGroupStrategyOptions from policy API Group instead. -message FSGroupStrategyOptions { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. message HTTPIngressPath { @@ -453,38 +409,17 @@ message HTTPIngressRuleValue { repeated HTTPIngressPath paths = 1; } -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -// Deprecated: use HostPortRange from policy API Group instead. -message HostPortRange { - // min is the start of the range, inclusive. - optional int32 min = 1; - - // max is the end of the range, inclusive. - optional int32 max = 2; -} - -// IDRange provides a min/max of an allowed range of IDs. -// Deprecated: use IDRange from policy API Group instead. -message IDRange { - // min is the start of the range, inclusive. - optional int64 min = 1; - - // max is the end of the range, inclusive. - optional int64 max = 2; -} - // DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. -// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed +// IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs // that should not be included within this rule. message IPBlock { // CIDR is a string representing the IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.0/24" or "2001:db8::/64" optional string cidr = 1; // Except is a slice of CIDRs that should not be included within an IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.0/24" or "2001:db8::/64" // Except values will be rejected if they are outside the CIDR range // +optional repeated string except = 2; @@ -540,6 +475,53 @@ message IngressList { repeated Ingress items = 2; } +// IngressLoadBalancerIngress represents the status of a load-balancer ingress point. +message IngressLoadBalancerIngress { + // IP is set for load-balancer ingress points that are IP based. + // +optional + optional string ip = 1; + + // Hostname is set for load-balancer ingress points that are DNS based. + // +optional + optional string hostname = 2; + + // Ports provides information about the ports exposed by this LoadBalancer. + // +listType=atomic + // +optional + repeated IngressPortStatus ports = 4; +} + +// LoadBalancerStatus represents the status of a load-balancer. +message IngressLoadBalancerStatus { + // Ingress is a list containing ingress points for the load-balancer. + // +optional + repeated IngressLoadBalancerIngress ingress = 1; +} + +// IngressPortStatus represents the error condition of a service port +message IngressPortStatus { + // Port is the port number of the ingress port. + optional int32 port = 1; + + // Protocol is the protocol of the ingress port. + // The supported values are: "TCP", "UDP", "SCTP" + optional string protocol = 2; + + // Error is to record the problem with the service port + // The format of the error shall comply with the following rules: + // - built-in error values shall be specified in this file and those shall use + // CamelCase names + // - cloud provider specific error values must have names that comply with the + // format foo.example.com/CamelCase. + // --- + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + optional string error = 3; +} + // IngressRule represents the rules mapping the paths under a specified host to // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. @@ -632,7 +614,7 @@ message IngressSpec { message IngressStatus { // LoadBalancer contains the current status of the load-balancer. // +optional - optional k8s.io.api.core.v1.LoadBalancerStatus loadBalancer = 1; + optional IngressLoadBalancerStatus loadBalancer = 1; } // IngressTLS describes the transport layer security associated with an Ingress. @@ -664,11 +646,6 @@ message NetworkPolicy { // Specification of the desired behavior for this NetworkPolicy. // +optional optional NetworkPolicySpec spec = 2; - - // Status is the current state of the NetworkPolicy. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional NetworkPolicyStatus status = 3; } // DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. @@ -769,8 +746,6 @@ message NetworkPolicyPort { // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. - // This feature is in Beta state and is enabled by default. - // It can be disabled using the Feature Gate "NetworkPolicyEndPort". // +optional optional int32 endPort = 3; } @@ -818,176 +793,6 @@ message NetworkPolicySpec { repeated string policyTypes = 4; } -// NetworkPolicyStatus describe the current state of the NetworkPolicy. -message NetworkPolicyStatus { - // Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. - // Current service state - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; -} - -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -// Deprecated: use PodSecurityPolicy from policy API Group instead. -message PodSecurityPolicy { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec defines the policy enforced. - // +optional - optional PodSecurityPolicySpec spec = 2; -} - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -// Deprecated: use PodSecurityPolicyList from policy API Group instead. -message PodSecurityPolicyList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is a list of schema objects. - repeated PodSecurityPolicy items = 2; -} - -// PodSecurityPolicySpec defines the policy enforced. -// Deprecated: use PodSecurityPolicySpec from policy API Group instead. -message PodSecurityPolicySpec { - // privileged determines if a pod can request to be run as privileged. - // +optional - optional bool privileged = 1; - - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - repeated string defaultAddCapabilities = 2; - - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - repeated string requiredDropCapabilities = 3; - - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - repeated string allowedCapabilities = 4; - - // volumes is an allowlist of volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - repeated string volumes = 5; - - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - optional bool hostNetwork = 6; - - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - repeated HostPortRange hostPorts = 7; - - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - optional bool hostPID = 8; - - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - optional bool hostIPC = 9; - - // seLinux is the strategy that will dictate the allowable labels that may be set. - optional SELinuxStrategyOptions seLinux = 10; - - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - optional RunAsUserStrategyOptions runAsUser = 11; - - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - optional RunAsGroupStrategyOptions runAsGroup = 22; - - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - optional SupplementalGroupsStrategyOptions supplementalGroups = 12; - - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - optional FSGroupStrategyOptions fsGroup = 13; - - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - optional bool readOnlyRootFilesystem = 14; - - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - optional bool defaultAllowPrivilegeEscalation = 15; - - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - optional bool allowPrivilegeEscalation = 16; - - // allowedHostPaths is an allowlist of host paths. Empty indicates - // that all host paths may be used. - // +optional - repeated AllowedHostPath allowedHostPaths = 17; - - // allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - repeated AllowedFlexVolume allowedFlexVolumes = 18; - - // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // +optional - repeated AllowedCSIDriver allowedCSIDrivers = 23; - - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - repeated string allowedUnsafeSysctls = 19; - - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - repeated string forbiddenSysctls = 20; - - // AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - repeated string allowedProcMountTypes = 21; - - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - optional RuntimeClassStrategyOptions runtimeClass = 24; -} - // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for // more information. // ReplicaSet ensures that a specified number of pod replicas are running at any given time. @@ -1076,7 +881,7 @@ message ReplicaSetSpec { // ReplicaSetStatus represents the current status of a ReplicaSet. message ReplicaSetStatus { - // Replicas is the most recently oberved number of replicas. + // Replicas is the most recently observed number of replicas. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller optional int32 replicas = 1; @@ -1182,57 +987,6 @@ message RollingUpdateDeployment { optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. -message RunAsGroupStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsUserStrategyOptions from policy API Group instead. -message RunAsUserStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -message RuntimeClassStrategyOptions { - // allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - repeated string allowedRuntimeClassNames = 1; - - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - optional string defaultRuntimeClassName = 2; -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use SELinuxStrategyOptions from policy API Group instead. -message SELinuxStrategyOptions { - // rule is the strategy that will dictate the allowable labels that may be set. - optional string rule = 1; - - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; -} - // represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. @@ -1260,7 +1014,7 @@ message ScaleStatus { // actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional // +mapType=atomic map selector = 2; @@ -1275,16 +1029,3 @@ message ScaleStatus { optional string targetSelector = 3; } -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead. -message SupplementalGroupsStrategyOptions { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - diff --git a/vendor/k8s.io/api/extensions/v1beta1/register.go b/vendor/k8s.io/api/extensions/v1beta1/register.go index c69eff0bc..d58908edc 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/register.go +++ b/vendor/k8s.io/api/extensions/v1beta1/register.go @@ -54,8 +54,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &IngressList{}, &ReplicaSet{}, &ReplicaSetList{}, - &PodSecurityPolicy{}, - &PodSecurityPolicyList{}, &NetworkPolicy{}, &NetworkPolicyList{}, ) diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index c63cf0d67..70b349f65 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -35,7 +35,7 @@ type ScaleStatus struct { // actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional // +mapType=atomic Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` @@ -683,7 +683,54 @@ type IngressTLS struct { type IngressStatus struct { // LoadBalancer contains the current status of the load-balancer. // +optional - LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` + LoadBalancer IngressLoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` +} + +// LoadBalancerStatus represents the status of a load-balancer. +type IngressLoadBalancerStatus struct { + // Ingress is a list containing ingress points for the load-balancer. + // +optional + Ingress []IngressLoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` +} + +// IngressLoadBalancerIngress represents the status of a load-balancer ingress point. +type IngressLoadBalancerIngress struct { + // IP is set for load-balancer ingress points that are IP based. + // +optional + IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + + // Hostname is set for load-balancer ingress points that are DNS based. + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` + + // Ports provides information about the ports exposed by this LoadBalancer. + // +listType=atomic + // +optional + Ports []IngressPortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"` +} + +// IngressPortStatus represents the error condition of a service port +type IngressPortStatus struct { + // Port is the port number of the ingress port. + Port int32 `json:"port" protobuf:"varint,1,opt,name=port"` + + // Protocol is the protocol of the ingress port. + // The supported values are: "TCP", "UDP", "SCTP" + Protocol v1.Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` + + // Error is to record the problem with the service port + // The format of the error shall comply with the following rules: + // - built-in error values shall be specified in this file and those shall use + // CamelCase names + // - cloud provider specific error values must have names that comply with the + // format foo.example.com/CamelCase. + // --- + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` } // IngressRule represents the rules mapping the paths under a specified host to @@ -920,7 +967,7 @@ type ReplicaSetSpec struct { // ReplicaSetStatus represents the current status of a ReplicaSet. type ReplicaSetStatus struct { - // Replicas is the most recently oberved number of replicas. + // Replicas is the most recently observed number of replicas. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` @@ -974,389 +1021,6 @@ type ReplicaSetCondition struct { Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.2 -// +k8s:prerelease-lifecycle-gen:deprecated=1.11 -// +k8s:prerelease-lifecycle-gen:removed=1.16 -// +k8s:prerelease-lifecycle-gen:replacement=policy,v1beta1,PodSecurityPolicy - -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -// Deprecated: use PodSecurityPolicy from policy API Group instead. -type PodSecurityPolicy struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec defines the policy enforced. - // +optional - Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// PodSecurityPolicySpec defines the policy enforced. -// Deprecated: use PodSecurityPolicySpec from policy API Group instead. -type PodSecurityPolicySpec struct { - // privileged determines if a pod can request to be run as privileged. - // +optional - Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // volumes is an allowlist of volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"` - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"` - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"` - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"` - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"` - // seLinux is the strategy that will dictate the allowable labels that may be set. - SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"` - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"` - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"` - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"` - // allowedHostPaths is an allowlist of host paths. Empty indicates - // that all host paths may be used. - // +optional - AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"` - // allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` - // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // +optional - AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,19,rep,name=allowedUnsafeSysctls"` - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,20,rep,name=forbiddenSysctls"` - // AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -// Deprecated: use AllowedHostPath from policy API Group instead. -type AllowedHostPath struct { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - PathPrefix string `json:"pathPrefix,omitempty" protobuf:"bytes,1,rep,name=pathPrefix"` - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` -} - -// FSType gives strong typing to different file systems that are used by volumes. -// Deprecated: use FSType from policy API Group instead. -type FSType string - -const ( - AzureFile FSType = "azureFile" - Flocker FSType = "flocker" - FlexVolume FSType = "flexVolume" - HostPath FSType = "hostPath" - EmptyDir FSType = "emptyDir" - GCEPersistentDisk FSType = "gcePersistentDisk" - AWSElasticBlockStore FSType = "awsElasticBlockStore" - GitRepo FSType = "gitRepo" - Secret FSType = "secret" - NFS FSType = "nfs" - ISCSI FSType = "iscsi" - Glusterfs FSType = "glusterfs" - PersistentVolumeClaim FSType = "persistentVolumeClaim" - RBD FSType = "rbd" - Cinder FSType = "cinder" - CephFS FSType = "cephFS" - DownwardAPI FSType = "downwardAPI" - FC FSType = "fc" - ConfigMap FSType = "configMap" - Quobyte FSType = "quobyte" - AzureDisk FSType = "azureDisk" - CSI FSType = "csi" - All FSType = "*" -) - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -// Deprecated: use AllowedFlexVolume from policy API Group instead. -type AllowedFlexVolume struct { - // driver is the name of the Flexvolume driver. - Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` -} - -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -type AllowedCSIDriver struct { - // Name is the registered name of the CSI driver - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` -} - -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -// Deprecated: use HostPortRange from policy API Group instead. -type HostPortRange struct { - // min is the start of the range, inclusive. - Min int32 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int32 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use SELinuxStrategyOptions from policy API Group instead. -type SELinuxStrategyOptions struct { - // rule is the strategy that will dictate the allowable labels that may be set. - Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` -} - -// SELinuxStrategy denotes strategy types for generating SELinux options for a -// Security Context. -// Deprecated: use SELinuxStrategy from policy API Group instead. -type SELinuxStrategy string - -const ( - // SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied. - // Deprecated: use SELinuxStrategyMustRunAs from policy API Group instead. - SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" - // SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels. - // Deprecated: use SELinuxStrategyRunAsAny from policy API Group instead. - SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" -) - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsUserStrategyOptions from policy API Group instead. -type RunAsUserStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"` - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. -type RunAsGroupStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// IDRange provides a min/max of an allowed range of IDs. -// Deprecated: use IDRange from policy API Group instead. -type IDRange struct { - // min is the start of the range, inclusive. - Min int64 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int64 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a -// Security Context. -// Deprecated: use RunAsUserStrategy from policy API Group instead. -type RunAsUserStrategy string - -const ( - // RunAsUserStrategyMustRunAs means that container must run as a particular uid. - // Deprecated: use RunAsUserStrategyMustRunAs from policy API Group instead. - RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" - // RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid. - // Deprecated: use RunAsUserStrategyMustRunAsNonRoot from policy API Group instead. - RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" - // RunAsUserStrategyRunAsAny means that container may make requests for any uid. - // Deprecated: use RunAsUserStrategyRunAsAny from policy API Group instead. - RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" -) - -// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a -// Security Context. -// Deprecated: use RunAsGroupStrategy from policy API Group instead. -type RunAsGroupStrategy string - -const ( - // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. - // However, when RunAsGroup are specified, they have to fall in the defined range. - RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" - // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. - // Deprecated: use RunAsGroupStrategyMustRunAs from policy API Group instead. - RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" - // RunAsGroupStrategyRunAsAny means that container may make requests for any gid. - // Deprecated: use RunAsGroupStrategyRunAsAny from policy API Group instead. - RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" -) - -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use FSGroupStrategyOptions from policy API Group instead. -type FSGroupStrategyOptions struct { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"` - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// FSGroupStrategyType denotes strategy types for generating FSGroup values for a -// SecurityContext -// Deprecated: use FSGroupStrategyType from policy API Group instead. -type FSGroupStrategyType string - -const ( - // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. - // Deprecated: use FSGroupStrategyMustRunAs from policy API Group instead. - FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" - // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. - // Deprecated: use FSGroupStrategyRunAsAny from policy API Group instead. - FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" -) - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead. -type SupplementalGroupsStrategyOptions struct { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"` - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental -// groups for a SecurityContext. -// Deprecated: use SupplementalGroupsStrategyType from policy API Group instead. -type SupplementalGroupsStrategyType string - -const ( - // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. - // Deprecated: use SupplementalGroupsStrategyMustRunAs from policy API Group instead. - SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" - // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. - // Deprecated: use SupplementalGroupsStrategyRunAsAny from policy API Group instead. - SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" -) - -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -type RuntimeClassStrategyOptions struct { - // allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` -} - -// AllowAllRuntimeClassNames can be used as a value for the -// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is -// allowed. -const AllowAllRuntimeClassNames = "*" - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.2 -// +k8s:prerelease-lifecycle-gen:deprecated=1.11 -// +k8s:prerelease-lifecycle-gen:removed=1.16 -// +k8s:prerelease-lifecycle-gen:replacement=policy,v1beta1,PodSecurityPolicyList - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -// Deprecated: use PodSecurityPolicyList from policy API Group instead. -type PodSecurityPolicyList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is a list of schema objects. - Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` -} - // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.3 @@ -1377,10 +1041,10 @@ type NetworkPolicy struct { // +optional Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current state of the NetworkPolicy. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Status NetworkPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` + // Status is tombstoned to show why 3 is a reserved protobuf tag. + // This commented field should remain, so in the future if we decide to reimplement + // NetworkPolicyStatus a different protobuf name and tag SHOULD be used! + // Status NetworkPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // DEPRECATED 1.9 - This group version of PolicyType is deprecated by networking/v1/PolicyType. @@ -1498,22 +1162,20 @@ type NetworkPolicyPort struct { // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. - // This feature is in Beta state and is enabled by default. - // It can be disabled using the Feature Gate "NetworkPolicyEndPort". // +optional EndPort *int32 `json:"endPort,omitempty" protobuf:"bytes,3,opt,name=endPort"` } // DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. -// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed +// IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs // that should not be included within this rule. type IPBlock struct { // CIDR is a string representing the IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.0/24" or "2001:db8::/64" CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"` // Except is a slice of CIDRs that should not be included within an IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.0/24" or "2001:db8::/64" // Except values will be rejected if they are outside the CIDR range // +optional Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` @@ -1545,48 +1207,6 @@ type NetworkPolicyPeer struct { IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"` } -// NetworkPolicyConditionType is the type for status conditions on -// a NetworkPolicy. This type should be used with the -// NetworkPolicyStatus.Conditions field. -type NetworkPolicyConditionType string - -const ( - // NetworkPolicyConditionStatusAccepted represents status of a Network Policy that could be properly parsed by - // the Network Policy provider and will be implemented in the cluster - NetworkPolicyConditionStatusAccepted NetworkPolicyConditionType = "Accepted" - - // NetworkPolicyConditionStatusPartialFailure represents status of a Network Policy that could be partially - // parsed by the Network Policy provider and may not be completely implemented due to a lack of a feature or some - // other condition - NetworkPolicyConditionStatusPartialFailure NetworkPolicyConditionType = "PartialFailure" - - // NetworkPolicyConditionStatusFailure represents status of a Network Policy that could not be parsed by the - // Network Policy provider and will not be implemented in the cluster - NetworkPolicyConditionStatusFailure NetworkPolicyConditionType = "Failure" -) - -// NetworkPolicyConditionReason defines the set of reasons that explain why a -// particular NetworkPolicy condition type has been raised. -type NetworkPolicyConditionReason string - -const ( - // NetworkPolicyConditionReasonFeatureNotSupported represents a reason where the Network Policy may not have been - // implemented in the cluster due to a lack of some feature not supported by the Network Policy provider - NetworkPolicyConditionReasonFeatureNotSupported NetworkPolicyConditionReason = "FeatureNotSupported" -) - -// NetworkPolicyStatus describe the current state of the NetworkPolicy. -type NetworkPolicyStatus struct { - // Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. - // Current service state - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -} - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.3 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index cf924b589..408022c9d 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -24,37 +24,9 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AllowedCSIDriver = map[string]string{ - "": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.", - "name": "Name is the registered name of the CSI driver", -} - -func (AllowedCSIDriver) SwaggerDoc() map[string]string { - return map_AllowedCSIDriver -} - -var map_AllowedFlexVolume = map[string]string{ - "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used. Deprecated: use AllowedFlexVolume from policy API Group instead.", - "driver": "driver is the name of the Flexvolume driver.", -} - -func (AllowedFlexVolume) SwaggerDoc() map[string]string { - return map_AllowedFlexVolume -} - -var map_AllowedHostPath = map[string]string{ - "": "AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined. Deprecated: use AllowedHostPath from policy API Group instead.", - "pathPrefix": "pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`", - "readOnly": "when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.", -} - -func (AllowedHostPath) SwaggerDoc() map[string]string { - return map_AllowedHostPath -} - var map_DaemonSet = map[string]string{ "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -220,16 +192,6 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { return map_DeploymentStrategy } -var map_FSGroupStrategyOptions = map[string]string{ - "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy. Deprecated: use FSGroupStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_FSGroupStrategyOptions -} - var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.", @@ -250,30 +212,10 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { return map_HTTPIngressRuleValue } -var map_HostPortRange = map[string]string{ - "": "HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined. Deprecated: use HostPortRange from policy API Group instead.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (HostPortRange) SwaggerDoc() map[string]string { - return map_HostPortRange -} - -var map_IDRange = map[string]string{ - "": "IDRange provides a min/max of an allowed range of IDs. Deprecated: use IDRange from policy API Group instead.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (IDRange) SwaggerDoc() map[string]string { - return map_IDRange -} - var map_IPBlock = map[string]string{ - "": "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\",\"2001:db9::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", - "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\"", - "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\" Except values will be rejected if they are outside the CIDR range", + "": "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.0/24\",\"2001:db8::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", + "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"", + "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" Except values will be rejected if they are outside the CIDR range", } func (IPBlock) SwaggerDoc() map[string]string { @@ -312,6 +254,37 @@ func (IngressList) SwaggerDoc() map[string]string { return map_IngressList } +var map_IngressLoadBalancerIngress = map[string]string{ + "": "IngressLoadBalancerIngress represents the status of a load-balancer ingress point.", + "ip": "IP is set for load-balancer ingress points that are IP based.", + "hostname": "Hostname is set for load-balancer ingress points that are DNS based.", + "ports": "Ports provides information about the ports exposed by this LoadBalancer.", +} + +func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { + return map_IngressLoadBalancerIngress +} + +var map_IngressLoadBalancerStatus = map[string]string{ + "": "LoadBalancerStatus represents the status of a load-balancer.", + "ingress": "Ingress is a list containing ingress points for the load-balancer.", +} + +func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { + return map_IngressLoadBalancerStatus +} + +var map_IngressPortStatus = map[string]string{ + "": "IngressPortStatus represents the error condition of a service port", + "port": "Port is the port number of the ingress port.", + "protocol": "Protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "error": "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", +} + +func (IngressPortStatus) SwaggerDoc() map[string]string { + return map_IngressPortStatus +} + var map_IngressRule = map[string]string{ "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", @@ -365,7 +338,6 @@ var map_NetworkPolicy = map[string]string{ "": "DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. NetworkPolicy describes what network traffic is allowed for a set of Pods", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the desired behavior for this NetworkPolicy.", - "status": "Status is the current state of the NetworkPolicy. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (NetworkPolicy) SwaggerDoc() map[string]string { @@ -417,7 +389,7 @@ var map_NetworkPolicyPort = map[string]string{ "": "DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort.", "protocol": "Optional. The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", "port": "The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", - "endPort": "If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate \"NetworkPolicyEndPort\".", + "endPort": "If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port.", } func (NetworkPolicyPort) SwaggerDoc() map[string]string { @@ -436,67 +408,6 @@ func (NetworkPolicySpec) SwaggerDoc() map[string]string { return map_NetworkPolicySpec } -var map_NetworkPolicyStatus = map[string]string{ - "": "NetworkPolicyStatus describe the current state of the NetworkPolicy.", - "conditions": "Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. Current service state", -} - -func (NetworkPolicyStatus) SwaggerDoc() map[string]string { - return map_NetworkPolicyStatus -} - -var map_PodSecurityPolicy = map[string]string{ - "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated: use PodSecurityPolicy from policy API Group instead.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec defines the policy enforced.", -} - -func (PodSecurityPolicy) SwaggerDoc() map[string]string { - return map_PodSecurityPolicy -} - -var map_PodSecurityPolicyList = map[string]string{ - "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects. Deprecated: use PodSecurityPolicyList from policy API Group instead.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is a list of schema objects.", -} - -func (PodSecurityPolicyList) SwaggerDoc() map[string]string { - return map_PodSecurityPolicyList -} - -var map_PodSecurityPolicySpec = map[string]string{ - "": "PodSecurityPolicySpec defines the policy enforced. Deprecated: use PodSecurityPolicySpec from policy API Group instead.", - "privileged": "privileged determines if a pod can request to be run as privileged.", - "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.", - "requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", - "allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.", - "volumes": "volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.", - "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", - "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", - "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", - "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", - "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", - "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", - "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", - "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", - "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", - "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", - "defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", - "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", - "allowedHostPaths": "allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used.", - "allowedFlexVolumes": "allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedCSIDrivers": "AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes.", - "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", - "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", - "allowedProcMountTypes": "AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", - "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", -} - -func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { - return map_PodSecurityPolicySpec -} - var map_ReplicaSet = map[string]string{ "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -545,7 +456,7 @@ func (ReplicaSetSpec) SwaggerDoc() map[string]string { var map_ReplicaSetStatus = map[string]string{ "": "ReplicaSetStatus represents the current status of a ReplicaSet.", - "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "replicas": "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", "readyReplicas": "The number of ready replicas for this replica set.", "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", @@ -586,46 +497,6 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string { return map_RollingUpdateDeployment } -var map_RunAsGroupStrategyOptions = map[string]string{ - "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsGroupStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", - "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsGroupStrategyOptions -} - -var map_RunAsUserStrategyOptions = map[string]string{ - "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsUserStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", - "ranges": "ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsUserStrategyOptions -} - -var map_RuntimeClassStrategyOptions = map[string]string{ - "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", - "allowedRuntimeClassNames": "allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", - "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", -} - -func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { - return map_RuntimeClassStrategyOptions -} - -var map_SELinuxStrategyOptions = map[string]string{ - "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use SELinuxStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable labels that may be set.", - "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", -} - -func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { - return map_SELinuxStrategyOptions -} - var map_Scale = map[string]string{ "": "represents a scaling request for a resource.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", @@ -649,7 +520,7 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "represents the current status of a scale subresource.", "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "selector": "selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", } @@ -657,14 +528,4 @@ func (ScaleStatus) SwaggerDoc() map[string]string { return map_ScaleStatus } -var map_SupplementalGroupsStrategyOptions = map[string]string{ - "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { - return map_SupplementalGroupsStrategyOptions -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index 9c1c8421c..6b474ae48 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -28,54 +28,6 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver. -func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver { - if in == nil { - return nil - } - out := new(AllowedCSIDriver) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. -func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { - if in == nil { - return nil - } - out := new(AllowedFlexVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. -func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { - if in == nil { - return nil - } - out := new(AllowedHostPath) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { *out = *in @@ -435,27 +387,6 @@ func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions. -func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { - if in == nil { - return nil - } - out := new(FSGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { *out = *in @@ -501,38 +432,6 @@ func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPortRange) DeepCopyInto(out *HostPortRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange. -func (in *HostPortRange) DeepCopy() *HostPortRange { - if in == nil { - return nil - } - out := new(HostPortRange) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IDRange) DeepCopyInto(out *IDRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange. -func (in *IDRange) DeepCopy() *IDRange { - if in == nil { - return nil - } - out := new(IDRange) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPBlock) DeepCopyInto(out *IPBlock) { *out = *in @@ -637,6 +536,73 @@ func (in *IngressList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressLoadBalancerIngress) DeepCopyInto(out *IngressLoadBalancerIngress) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]IngressPortStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerIngress. +func (in *IngressLoadBalancerIngress) DeepCopy() *IngressLoadBalancerIngress { + if in == nil { + return nil + } + out := new(IngressLoadBalancerIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressLoadBalancerStatus) DeepCopyInto(out *IngressLoadBalancerStatus) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]IngressLoadBalancerIngress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerStatus. +func (in *IngressLoadBalancerStatus) DeepCopy() *IngressLoadBalancerStatus { + if in == nil { + return nil + } + out := new(IngressLoadBalancerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressPortStatus) DeepCopyInto(out *IngressPortStatus) { + *out = *in + if in.Error != nil { + in, out := &in.Error, &out.Error + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressPortStatus. +func (in *IngressPortStatus) DeepCopy() *IngressPortStatus { + if in == nil { + return nil + } + out := new(IngressPortStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IngressRule) DeepCopyInto(out *IngressRule) { *out = *in @@ -759,7 +725,6 @@ func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) return } @@ -972,184 +937,6 @@ func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkPolicyStatus) DeepCopyInto(out *NetworkPolicyStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyStatus. -func (in *NetworkPolicyStatus) DeepCopy() *NetworkPolicyStatus { - if in == nil { - return nil - } - out := new(NetworkPolicyStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy. -func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy { - if in == nil { - return nil - } - out := new(PodSecurityPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodSecurityPolicy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList. -func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList { - if in == nil { - return nil - } - out := new(PodSecurityPolicyList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { - *out = *in - if in.DefaultAddCapabilities != nil { - in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.RequiredDropCapabilities != nil { - in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.AllowedCapabilities != nil { - in, out := &in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]FSType, len(*in)) - copy(*out, *in) - } - if in.HostPorts != nil { - in, out := &in.HostPorts, &out.HostPorts - *out = make([]HostPortRange, len(*in)) - copy(*out, *in) - } - in.SELinux.DeepCopyInto(&out.SELinux) - in.RunAsUser.DeepCopyInto(&out.RunAsUser) - if in.RunAsGroup != nil { - in, out := &in.RunAsGroup, &out.RunAsGroup - *out = new(RunAsGroupStrategyOptions) - (*in).DeepCopyInto(*out) - } - in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) - in.FSGroup.DeepCopyInto(&out.FSGroup) - if in.DefaultAllowPrivilegeEscalation != nil { - in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowPrivilegeEscalation != nil { - in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowedHostPaths != nil { - in, out := &in.AllowedHostPaths, &out.AllowedHostPaths - *out = make([]AllowedHostPath, len(*in)) - copy(*out, *in) - } - if in.AllowedFlexVolumes != nil { - in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes - *out = make([]AllowedFlexVolume, len(*in)) - copy(*out, *in) - } - if in.AllowedCSIDrivers != nil { - in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers - *out = make([]AllowedCSIDriver, len(*in)) - copy(*out, *in) - } - if in.AllowedUnsafeSysctls != nil { - in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ForbiddenSysctls != nil { - in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.AllowedProcMountTypes != nil { - in, out := &in.AllowedProcMountTypes, &out.AllowedProcMountTypes - *out = make([]corev1.ProcMountType, len(*in)) - copy(*out, *in) - } - if in.RuntimeClass != nil { - in, out := &in.RuntimeClass, &out.RuntimeClass - *out = new(RuntimeClassStrategyOptions) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec. -func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { - if in == nil { - return nil - } - out := new(PodSecurityPolicySpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { *out = *in @@ -1346,95 +1133,6 @@ func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. -func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions. -func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsUserStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { - *out = *in - if in.AllowedRuntimeClassNames != nil { - in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DefaultRuntimeClassName != nil { - in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. -func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { - if in == nil { - return nil - } - out := new(RuntimeClassStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { - *out = *in - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(corev1.SELinuxOptions) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions. -func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions { - if in == nil { - return nil - } - out := new(SELinuxStrategyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Scale) DeepCopyInto(out *Scale) { *out = *in @@ -1501,24 +1199,3 @@ func (in *ScaleStatus) DeepCopy() *ScaleStatus { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions. -func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions { - if in == nil { - return nil - } - out := new(SupplementalGroupsStrategyOptions) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go index 963aaffba..5c9354228 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go @@ -235,54 +235,6 @@ func (in *NetworkPolicyList) APILifecycleRemoved() (major, minor int) { return 1, 16 } -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PodSecurityPolicy) APILifecycleIntroduced() (major, minor int) { - return 1, 2 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PodSecurityPolicy) APILifecycleDeprecated() (major, minor int) { - return 1, 11 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *PodSecurityPolicy) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodSecurityPolicy"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PodSecurityPolicy) APILifecycleRemoved() (major, minor int) { - return 1, 16 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PodSecurityPolicyList) APILifecycleIntroduced() (major, minor int) { - return 1, 2 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PodSecurityPolicyList) APILifecycleDeprecated() (major, minor int) { - return 1, 11 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *PodSecurityPolicyList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodSecurityPolicyList"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PodSecurityPolicyList) APILifecycleRemoved() (major, minor int) { - return 1, 16 -} - // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *ReplicaSet) APILifecycleIntroduced() (major, minor int) { diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go index a45e23e23..b54e1ceef 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go @@ -43,10 +43,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} } +func (*ExemptPriorityLevelConfiguration) ProtoMessage() {} +func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{0} +} +func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExemptPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExemptPriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExemptPriorityLevelConfiguration.Merge(m, src) +} +func (m *ExemptPriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *ExemptPriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ExemptPriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo + func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } func (*FlowDistinguisherMethod) ProtoMessage() {} func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{0} + return fileDescriptor_45ba024d525b289b, []int{1} } func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,7 +102,7 @@ var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo func (m *FlowSchema) Reset() { *m = FlowSchema{} } func (*FlowSchema) ProtoMessage() {} func (*FlowSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{1} + return fileDescriptor_45ba024d525b289b, []int{2} } func (m *FlowSchema) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -102,7 +130,7 @@ var xxx_messageInfo_FlowSchema proto.InternalMessageInfo func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } func (*FlowSchemaCondition) ProtoMessage() {} func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{2} + return fileDescriptor_45ba024d525b289b, []int{3} } func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -130,7 +158,7 @@ var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } func (*FlowSchemaList) ProtoMessage() {} func (*FlowSchemaList) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{3} + return fileDescriptor_45ba024d525b289b, []int{4} } func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -158,7 +186,7 @@ var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } func (*FlowSchemaSpec) ProtoMessage() {} func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{4} + return fileDescriptor_45ba024d525b289b, []int{5} } func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,7 +214,7 @@ var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } func (*FlowSchemaStatus) ProtoMessage() {} func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{5} + return fileDescriptor_45ba024d525b289b, []int{6} } func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -214,7 +242,7 @@ var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo func (m *GroupSubject) Reset() { *m = GroupSubject{} } func (*GroupSubject) ProtoMessage() {} func (*GroupSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{6} + return fileDescriptor_45ba024d525b289b, []int{7} } func (m *GroupSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -242,7 +270,7 @@ var xxx_messageInfo_GroupSubject proto.InternalMessageInfo func (m *LimitResponse) Reset() { *m = LimitResponse{} } func (*LimitResponse) ProtoMessage() {} func (*LimitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{7} + return fileDescriptor_45ba024d525b289b, []int{8} } func (m *LimitResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -270,7 +298,7 @@ var xxx_messageInfo_LimitResponse proto.InternalMessageInfo func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{8} + return fileDescriptor_45ba024d525b289b, []int{9} } func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -298,7 +326,7 @@ var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } func (*NonResourcePolicyRule) ProtoMessage() {} func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{9} + return fileDescriptor_45ba024d525b289b, []int{10} } func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -326,7 +354,7 @@ var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } func (*PolicyRulesWithSubjects) ProtoMessage() {} func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{10} + return fileDescriptor_45ba024d525b289b, []int{11} } func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -354,7 +382,7 @@ var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } func (*PriorityLevelConfiguration) ProtoMessage() {} func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{11} + return fileDescriptor_45ba024d525b289b, []int{12} } func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -382,7 +410,7 @@ var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } func (*PriorityLevelConfigurationCondition) ProtoMessage() {} func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{12} + return fileDescriptor_45ba024d525b289b, []int{13} } func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -410,7 +438,7 @@ var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInf func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } func (*PriorityLevelConfigurationList) ProtoMessage() {} func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{13} + return fileDescriptor_45ba024d525b289b, []int{14} } func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -438,7 +466,7 @@ var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } func (*PriorityLevelConfigurationReference) ProtoMessage() {} func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{14} + return fileDescriptor_45ba024d525b289b, []int{15} } func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -466,7 +494,7 @@ var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInf func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } func (*PriorityLevelConfigurationSpec) ProtoMessage() {} func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{15} + return fileDescriptor_45ba024d525b289b, []int{16} } func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -494,7 +522,7 @@ var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } func (*PriorityLevelConfigurationStatus) ProtoMessage() {} func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{16} + return fileDescriptor_45ba024d525b289b, []int{17} } func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -522,7 +550,7 @@ var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } func (*QueuingConfiguration) ProtoMessage() {} func (*QueuingConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{17} + return fileDescriptor_45ba024d525b289b, []int{18} } func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -550,7 +578,7 @@ var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } func (*ResourcePolicyRule) ProtoMessage() {} func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{18} + return fileDescriptor_45ba024d525b289b, []int{19} } func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -578,7 +606,7 @@ var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } func (*ServiceAccountSubject) ProtoMessage() {} func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{19} + return fileDescriptor_45ba024d525b289b, []int{20} } func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -606,7 +634,7 @@ var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{20} + return fileDescriptor_45ba024d525b289b, []int{21} } func (m *Subject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -634,7 +662,7 @@ var xxx_messageInfo_Subject proto.InternalMessageInfo func (m *UserSubject) Reset() { *m = UserSubject{} } func (*UserSubject) ProtoMessage() {} func (*UserSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{21} + return fileDescriptor_45ba024d525b289b, []int{22} } func (m *UserSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -660,6 +688,7 @@ func (m *UserSubject) XXX_DiscardUnknown() { var xxx_messageInfo_UserSubject proto.InternalMessageInfo func init() { + proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.ExemptPriorityLevelConfiguration") proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowDistinguisherMethod") proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchema") proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaCondition") @@ -689,102 +718,142 @@ func init() { } var fileDescriptor_45ba024d525b289b = []byte{ - // 1505 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4b, 0x6f, 0xdb, 0xc6, - 0x16, 0x36, 0x65, 0xc9, 0xb6, 0xc6, 0xcf, 0x8c, 0x13, 0x58, 0x70, 0x00, 0xc9, 0xe1, 0x05, 0x6e, - 0x72, 0x6f, 0x12, 0x32, 0xce, 0x4d, 0x72, 0x53, 0x04, 0x45, 0x60, 0x3a, 0x69, 0x5e, 0xb6, 0x6b, - 0x8f, 0x92, 0x14, 0x0d, 0x52, 0x20, 0x34, 0x35, 0x96, 0x26, 0x96, 0x48, 0x76, 0x86, 0x54, 0xea, - 0x22, 0x8b, 0x02, 0xfd, 0x03, 0xfd, 0x01, 0x59, 0x76, 0xd1, 0x75, 0x7f, 0x41, 0x97, 0x46, 0xd1, - 0x45, 0x96, 0x59, 0x09, 0xb1, 0xba, 0x2d, 0xba, 0x6e, 0xb3, 0x2a, 0x66, 0x38, 0x24, 0x45, 0xbd, - 0xa8, 0xd4, 0x40, 0x56, 0xdd, 0x89, 0xe7, 0xf1, 0x9d, 0x39, 0x67, 0xce, 0x39, 0xf3, 0xd9, 0xe0, - 0xee, 0xfe, 0x75, 0xa6, 0x11, 0x47, 0xdf, 0xf7, 0x77, 0x31, 0xb5, 0xb1, 0x87, 0x99, 0xde, 0xc4, - 0x76, 0xc5, 0xa1, 0xba, 0x54, 0x98, 0x2e, 0xd1, 0xf7, 0xea, 0xce, 0x0b, 0xcb, 0xb1, 0x3d, 0xea, - 0xd4, 0xf5, 0xe6, 0xaa, 0x59, 0x77, 0x6b, 0xe6, 0xaa, 0x5e, 0xc5, 0x36, 0xa6, 0xa6, 0x87, 0x2b, - 0x9a, 0x4b, 0x1d, 0xcf, 0x81, 0xa5, 0xc0, 0x41, 0x33, 0x5d, 0xa2, 0x75, 0x38, 0x68, 0xa1, 0xc3, - 0xf2, 0xc5, 0x2a, 0xf1, 0x6a, 0xfe, 0xae, 0x66, 0x39, 0x0d, 0xbd, 0xea, 0x54, 0x1d, 0x5d, 0xf8, - 0xed, 0xfa, 0x7b, 0xe2, 0x4b, 0x7c, 0x88, 0x5f, 0x01, 0xde, 0xf2, 0x95, 0xf8, 0x00, 0x0d, 0xd3, - 0xaa, 0x11, 0x1b, 0xd3, 0x03, 0xdd, 0xdd, 0xaf, 0x72, 0x01, 0xd3, 0x1b, 0xd8, 0x33, 0xf5, 0x66, - 0xcf, 0x29, 0x96, 0xf5, 0x41, 0x5e, 0xd4, 0xb7, 0x3d, 0xd2, 0xc0, 0x3d, 0x0e, 0xd7, 0xd2, 0x1c, - 0x98, 0x55, 0xc3, 0x0d, 0xb3, 0xdb, 0x4f, 0x7d, 0x02, 0x96, 0x3e, 0xa9, 0x3b, 0x2f, 0x6e, 0x11, - 0xe6, 0x11, 0xbb, 0xea, 0x13, 0x56, 0xc3, 0x74, 0x13, 0x7b, 0x35, 0xa7, 0x02, 0x6f, 0x82, 0xac, - 0x77, 0xe0, 0xe2, 0x82, 0xb2, 0xa2, 0x9c, 0xcb, 0x1b, 0xe7, 0x0f, 0x5b, 0xa5, 0xb1, 0x76, 0xab, - 0x94, 0x7d, 0x78, 0xe0, 0xe2, 0x77, 0xad, 0xd2, 0xe9, 0x01, 0x6e, 0x5c, 0x8d, 0x84, 0xa3, 0xfa, - 0x2a, 0x03, 0x00, 0xb7, 0x2a, 0x8b, 0xd0, 0xf0, 0x19, 0x98, 0xe2, 0xe9, 0x56, 0x4c, 0xcf, 0x14, - 0x98, 0xd3, 0x97, 0x2f, 0x69, 0x71, 0xb1, 0xa3, 0x53, 0x6b, 0xee, 0x7e, 0x95, 0x0b, 0x98, 0xc6, - 0xad, 0xb5, 0xe6, 0xaa, 0xf6, 0xe9, 0xee, 0x73, 0x6c, 0x79, 0x9b, 0xd8, 0x33, 0x0d, 0x28, 0x4f, - 0x01, 0x62, 0x19, 0x8a, 0x50, 0xe1, 0x0e, 0xc8, 0x32, 0x17, 0x5b, 0x85, 0x8c, 0x40, 0xd7, 0xb5, - 0x94, 0xab, 0xd4, 0xe2, 0xc3, 0x95, 0x5d, 0x6c, 0x19, 0x33, 0x61, 0x8a, 0xfc, 0x0b, 0x09, 0x28, - 0xf8, 0x39, 0x98, 0x60, 0x9e, 0xe9, 0xf9, 0xac, 0x30, 0x2e, 0x40, 0x57, 0xdf, 0x07, 0x54, 0x38, - 0x1a, 0x73, 0x12, 0x76, 0x22, 0xf8, 0x46, 0x12, 0x50, 0x7d, 0x93, 0x01, 0x8b, 0xb1, 0xf1, 0xba, - 0x63, 0x57, 0x88, 0x47, 0x1c, 0x1b, 0xde, 0x48, 0xd4, 0xfd, 0x6c, 0x57, 0xdd, 0x97, 0xfa, 0xb8, - 0xc4, 0x35, 0x87, 0x1f, 0x45, 0xe7, 0xcd, 0x08, 0xf7, 0x33, 0xc9, 0xe0, 0xef, 0x5a, 0xa5, 0xf9, - 0xc8, 0x2d, 0x79, 0x1e, 0xd8, 0x04, 0xb0, 0x6e, 0x32, 0xef, 0x21, 0x35, 0x6d, 0x16, 0xc0, 0x92, - 0x06, 0x96, 0x69, 0xff, 0x77, 0xb4, 0x9b, 0xe2, 0x1e, 0xc6, 0xb2, 0x0c, 0x09, 0x37, 0x7a, 0xd0, - 0x50, 0x9f, 0x08, 0xf0, 0xdf, 0x60, 0x82, 0x62, 0x93, 0x39, 0x76, 0x21, 0x2b, 0x8e, 0x1c, 0xd5, - 0x0b, 0x09, 0x29, 0x92, 0x5a, 0xf8, 0x1f, 0x30, 0xd9, 0xc0, 0x8c, 0x99, 0x55, 0x5c, 0xc8, 0x09, - 0xc3, 0x79, 0x69, 0x38, 0xb9, 0x19, 0x88, 0x51, 0xa8, 0x57, 0x7f, 0x52, 0xc0, 0x5c, 0x5c, 0xa7, - 0x0d, 0xc2, 0x3c, 0xf8, 0xb4, 0xa7, 0xfb, 0xb4, 0xd1, 0x72, 0xe2, 0xde, 0xa2, 0xf7, 0x16, 0x64, - 0xb8, 0xa9, 0x50, 0xd2, 0xd1, 0x79, 0xdb, 0x20, 0x47, 0x3c, 0xdc, 0xe0, 0x55, 0x1f, 0x3f, 0x37, - 0x7d, 0xf9, 0xfc, 0x7b, 0x74, 0x89, 0x31, 0x2b, 0x71, 0x73, 0xf7, 0x38, 0x02, 0x0a, 0x80, 0xd4, - 0xdf, 0xc6, 0x3b, 0x53, 0xe0, 0x1d, 0x09, 0x7f, 0x50, 0xc0, 0xb2, 0x4b, 0x89, 0x43, 0x89, 0x77, - 0xb0, 0x81, 0x9b, 0xb8, 0xbe, 0xee, 0xd8, 0x7b, 0xa4, 0xea, 0x53, 0x93, 0xd7, 0x52, 0x66, 0x75, - 0x2b, 0x35, 0xf4, 0xf6, 0x40, 0x08, 0x84, 0xf7, 0x30, 0xc5, 0xb6, 0x85, 0x0d, 0x55, 0x9e, 0x69, - 0x79, 0x88, 0xf1, 0x90, 0xb3, 0xc0, 0xfb, 0x00, 0x36, 0x4c, 0x8f, 0xd7, 0xb4, 0xba, 0x4d, 0xb1, - 0x85, 0x2b, 0x1c, 0x55, 0xb4, 0x64, 0x2e, 0xee, 0x8f, 0xcd, 0x1e, 0x0b, 0xd4, 0xc7, 0x0b, 0x7e, - 0xab, 0x80, 0xc5, 0x4a, 0xef, 0xa2, 0x91, 0x9d, 0x79, 0x7d, 0xa4, 0x52, 0xf7, 0x59, 0x54, 0xc6, - 0x52, 0xbb, 0x55, 0x5a, 0xec, 0xa3, 0x40, 0xfd, 0xa2, 0xc1, 0x2f, 0x40, 0x8e, 0xfa, 0x75, 0xcc, - 0x0a, 0x59, 0x71, 0xc3, 0xe9, 0x61, 0xb7, 0x9d, 0x3a, 0xb1, 0x0e, 0x10, 0xf7, 0xf9, 0x8c, 0x78, - 0xb5, 0xb2, 0x2f, 0x36, 0x16, 0x8b, 0xaf, 0x5b, 0xa8, 0x50, 0x80, 0xaa, 0xbe, 0x04, 0x0b, 0xdd, - 0x8b, 0x03, 0xd6, 0x00, 0xb0, 0xc2, 0x59, 0x65, 0x05, 0x45, 0xc4, 0xbd, 0xf2, 0x1e, 0x9d, 0x15, - 0x0d, 0x7a, 0xbc, 0x36, 0x23, 0x11, 0x43, 0x1d, 0xd8, 0xea, 0x25, 0x30, 0x73, 0x87, 0x3a, 0xbe, - 0x2b, 0x0f, 0x09, 0x57, 0x40, 0xd6, 0x36, 0x1b, 0xe1, 0x0a, 0x8a, 0xf6, 0xe2, 0x96, 0xd9, 0xc0, - 0x48, 0x68, 0xd4, 0xef, 0x15, 0x30, 0xbb, 0x41, 0x1a, 0xc4, 0x43, 0x98, 0xb9, 0x8e, 0xcd, 0x30, - 0xbc, 0x9a, 0x58, 0x5b, 0x67, 0xba, 0xd6, 0xd6, 0x89, 0x84, 0x71, 0xc7, 0xc2, 0x7a, 0x0a, 0x26, - 0xbf, 0xf4, 0xb1, 0x4f, 0xec, 0xaa, 0x5c, 0xdb, 0x57, 0x53, 0x33, 0xdc, 0x09, 0xec, 0x13, 0x1d, - 0x67, 0x4c, 0xf3, 0x45, 0x20, 0x35, 0x28, 0x84, 0x54, 0x7f, 0x57, 0xc0, 0x19, 0x11, 0x19, 0x57, - 0x06, 0x77, 0x32, 0x7c, 0x0a, 0x0a, 0x26, 0x63, 0x3e, 0xc5, 0x95, 0x75, 0xc7, 0xb6, 0x7c, 0xca, - 0x67, 0xe0, 0xa0, 0x5c, 0x33, 0x29, 0x66, 0x22, 0x9d, 0x9c, 0xb1, 0x22, 0xd3, 0x29, 0xac, 0x0d, - 0xb0, 0x43, 0x03, 0x11, 0xe0, 0x3e, 0x98, 0xad, 0x77, 0x26, 0x2f, 0xf3, 0xd4, 0x52, 0xf3, 0x4c, - 0x94, 0xcc, 0x38, 0x25, 0x8f, 0x90, 0x2c, 0x3b, 0x4a, 0x62, 0xab, 0x2f, 0xc0, 0xa9, 0x2d, 0x3e, - 0xc8, 0xcc, 0xf1, 0xa9, 0x85, 0xe3, 0x1e, 0x84, 0x25, 0x90, 0x6b, 0x62, 0xba, 0x1b, 0xf4, 0x51, - 0xde, 0xc8, 0xf3, 0x0e, 0x7c, 0xcc, 0x05, 0x28, 0x90, 0xc3, 0x8f, 0xc1, 0xbc, 0x1d, 0x7b, 0x3e, - 0x42, 0x1b, 0xac, 0x30, 0x21, 0x4c, 0x17, 0xdb, 0xad, 0xd2, 0xfc, 0x56, 0x52, 0x85, 0xba, 0x6d, - 0xd5, 0xa3, 0x0c, 0x58, 0x1a, 0xd0, 0xf2, 0xf0, 0x31, 0x98, 0x62, 0xf2, 0xb7, 0x6c, 0xe3, 0x73, - 0xa9, 0xc9, 0x4b, 0xe7, 0x78, 0xeb, 0x86, 0x68, 0x28, 0xc2, 0x82, 0x2e, 0x98, 0xa5, 0xf2, 0x0c, - 0x22, 0xa8, 0xdc, 0xbe, 0xff, 0x4b, 0x05, 0xef, 0xad, 0x4f, 0x5c, 0x5e, 0xd4, 0x89, 0x88, 0x92, - 0x01, 0xe0, 0x4b, 0xb0, 0xd0, 0x91, 0x78, 0x10, 0x74, 0x5c, 0x04, 0xbd, 0x96, 0x1a, 0xb4, 0xef, - 0xbd, 0x18, 0x05, 0x19, 0x77, 0x61, 0xab, 0x0b, 0x17, 0xf5, 0x44, 0x52, 0x7f, 0xc9, 0x80, 0x21, - 0x0b, 0xf9, 0x03, 0x10, 0x2c, 0x33, 0x41, 0xb0, 0x6e, 0x1e, 0xe3, 0xa9, 0x19, 0x48, 0xb8, 0x48, - 0x17, 0xe1, 0x5a, 0x3b, 0x4e, 0x90, 0xe1, 0x04, 0xec, 0x8f, 0x0c, 0xf8, 0xd7, 0x60, 0xe7, 0x98, - 0x90, 0x3d, 0x48, 0x6c, 0xb6, 0xff, 0x77, 0x6d, 0xb6, 0xb3, 0x23, 0x40, 0xfc, 0x43, 0xd0, 0xba, - 0x08, 0xda, 0x5b, 0x05, 0x14, 0x07, 0xd7, 0xed, 0x03, 0x10, 0xb6, 0x67, 0x49, 0xc2, 0x76, 0xe3, - 0x18, 0x5d, 0x36, 0x80, 0xc0, 0xdd, 0x19, 0xd6, 0x5c, 0x11, 0xd3, 0x1a, 0xe1, 0xa9, 0x3d, 0x1c, - 0x5a, 0x2b, 0xc1, 0x0c, 0x53, 0xfe, 0x64, 0x48, 0x78, 0xdf, 0xb6, 0xcd, 0xdd, 0x3a, 0x6e, 0x60, - 0xdb, 0x93, 0x1d, 0x49, 0xc0, 0x64, 0x3d, 0x78, 0x22, 0xe5, 0x5c, 0x1b, 0xa3, 0xbd, 0x4c, 0xc3, - 0x9e, 0xd4, 0xe0, 0x39, 0x96, 0x66, 0x28, 0xc4, 0x57, 0x5f, 0x29, 0x60, 0x25, 0x6d, 0x5c, 0xe1, - 0x57, 0x7d, 0x68, 0xcf, 0x71, 0x58, 0xed, 0xe8, 0x34, 0xe8, 0x47, 0x05, 0x9c, 0xec, 0x47, 0x2e, - 0xf8, 0x04, 0x70, 0x46, 0x11, 0xd1, 0x81, 0x68, 0x02, 0x76, 0x84, 0x14, 0x49, 0x2d, 0xbc, 0x00, - 0xa6, 0x6a, 0xa6, 0x5d, 0x29, 0x93, 0xaf, 0x43, 0xb2, 0x1b, 0xf5, 0xe0, 0x5d, 0x29, 0x47, 0x91, - 0x05, 0xbc, 0x05, 0x16, 0x84, 0xdf, 0x06, 0xb6, 0xab, 0x5e, 0x4d, 0x14, 0x4b, 0x4c, 0x73, 0x2e, - 0x7e, 0x14, 0x76, 0xba, 0xf4, 0xa8, 0xc7, 0x43, 0xfd, 0x53, 0x01, 0xf0, 0xef, 0xbc, 0xf7, 0xe7, - 0x41, 0xde, 0x74, 0x89, 0xa0, 0x7d, 0xc1, 0x14, 0xe4, 0x8d, 0xd9, 0x76, 0xab, 0x94, 0x5f, 0xdb, - 0xbe, 0x17, 0x08, 0x51, 0xac, 0xe7, 0xc6, 0xe1, 0x43, 0x18, 0x3c, 0x78, 0xd2, 0x38, 0x0c, 0xcc, - 0x50, 0xac, 0x87, 0xd7, 0xc1, 0x8c, 0x55, 0xf7, 0x99, 0x87, 0x69, 0xd9, 0x72, 0x5c, 0x2c, 0xb6, - 0xc6, 0x94, 0x71, 0x52, 0xe6, 0x34, 0xb3, 0xde, 0xa1, 0x43, 0x09, 0x4b, 0xa8, 0x01, 0xc0, 0x5b, - 0x9e, 0xb9, 0x26, 0x8f, 0x93, 0x13, 0x71, 0xe6, 0xf8, 0x85, 0x6d, 0x45, 0x52, 0xd4, 0x61, 0xa1, - 0x3e, 0x07, 0xa7, 0xca, 0x98, 0x36, 0x89, 0x85, 0xd7, 0x2c, 0xcb, 0xf1, 0x6d, 0x2f, 0x24, 0xb0, - 0x3a, 0xc8, 0x47, 0x66, 0x72, 0x2a, 0x4e, 0xc8, 0xf8, 0xf9, 0x08, 0x0b, 0xc5, 0x36, 0xd1, 0x18, - 0x66, 0x06, 0x8e, 0xe1, 0xcf, 0x19, 0x30, 0x19, 0xc3, 0x67, 0xf7, 0x89, 0x5d, 0x91, 0xc8, 0xa7, - 0x43, 0xeb, 0x07, 0xc4, 0xae, 0xbc, 0x6b, 0x95, 0xa6, 0xa5, 0x19, 0xff, 0x44, 0xc2, 0x10, 0xde, - 0x07, 0x59, 0x9f, 0x61, 0x2a, 0x07, 0xec, 0x42, 0x6a, 0x37, 0x3f, 0x62, 0x98, 0x86, 0x0c, 0x68, - 0x8a, 0x43, 0x73, 0x01, 0x12, 0x18, 0x70, 0x0b, 0xe4, 0xaa, 0xfc, 0x56, 0xe4, 0xe6, 0xbf, 0x98, - 0x0a, 0xd6, 0x49, 0xed, 0x83, 0x46, 0x10, 0x12, 0x14, 0xc0, 0x40, 0x0a, 0xe6, 0x58, 0xa2, 0x88, - 0xe2, 0xc2, 0x46, 0x61, 0x34, 0x7d, 0x6b, 0x6f, 0xc0, 0x76, 0xab, 0x34, 0x97, 0x54, 0xa1, 0xae, - 0x08, 0xaa, 0x0e, 0xa6, 0x3b, 0x52, 0x4c, 0x5f, 0x82, 0xc6, 0xed, 0xc3, 0xa3, 0xe2, 0xd8, 0xeb, - 0xa3, 0xe2, 0xd8, 0x9b, 0xa3, 0xe2, 0xd8, 0x37, 0xed, 0xa2, 0x72, 0xd8, 0x2e, 0x2a, 0xaf, 0xdb, - 0x45, 0xe5, 0x4d, 0xbb, 0xa8, 0xbc, 0x6d, 0x17, 0x95, 0xef, 0x7e, 0x2d, 0x8e, 0x3d, 0x29, 0xa5, - 0xfc, 0xb7, 0xef, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x48, 0x84, 0x5f, 0xef, 0x28, 0x14, 0x00, - 0x00, + // 1621 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4d, 0x6f, 0xdb, 0x46, + 0x1a, 0x36, 0x65, 0xc9, 0xb6, 0xc6, 0x9f, 0x19, 0xc7, 0xb0, 0xd6, 0x59, 0x48, 0x0e, 0x17, 0xd8, + 0x64, 0x37, 0x09, 0x15, 0x67, 0x93, 0x6c, 0x16, 0xc1, 0x22, 0x30, 0x93, 0x6c, 0xbe, 0x6c, 0xc7, + 0x1e, 0x27, 0xd9, 0x36, 0x48, 0x81, 0xd0, 0xd4, 0x58, 0x9a, 0x58, 0x22, 0xd9, 0x19, 0x52, 0x8e, + 0x8b, 0x1c, 0x0a, 0xf4, 0x0f, 0xf4, 0x07, 0xe4, 0xd8, 0x43, 0x6f, 0x05, 0x7a, 0xed, 0xa5, 0xc7, + 0xa0, 0xe8, 0x21, 0xc7, 0x9c, 0x84, 0x58, 0xbd, 0xf6, 0x07, 0xb4, 0x39, 0x14, 0xc5, 0x0c, 0x87, + 0xa4, 0x28, 0x89, 0xa2, 0x52, 0x03, 0x39, 0xf5, 0x66, 0xbe, 0x1f, 0xcf, 0x3b, 0xf3, 0xce, 0xfb, + 0xf1, 0xc8, 0xe0, 0xf6, 0xde, 0x15, 0xa6, 0x11, 0xbb, 0xbc, 0xe7, 0xed, 0x60, 0x6a, 0x61, 0x17, + 0xb3, 0x72, 0x13, 0x5b, 0x15, 0x9b, 0x96, 0xa5, 0xc2, 0x70, 0x48, 0x79, 0xb7, 0x6e, 0xef, 0x9b, + 0xb6, 0xe5, 0x52, 0xbb, 0x5e, 0x6e, 0xae, 0x18, 0x75, 0xa7, 0x66, 0xac, 0x94, 0xab, 0xd8, 0xc2, + 0xd4, 0x70, 0x71, 0x45, 0x73, 0xa8, 0xed, 0xda, 0xb0, 0xe4, 0x3b, 0x68, 0x86, 0x43, 0xb4, 0x0e, + 0x07, 0x2d, 0x70, 0x58, 0x3a, 0x57, 0x25, 0x6e, 0xcd, 0xdb, 0xd1, 0x4c, 0xbb, 0x51, 0xae, 0xda, + 0x55, 0xbb, 0x2c, 0xfc, 0x76, 0xbc, 0x5d, 0xf1, 0x25, 0x3e, 0xc4, 0x5f, 0x3e, 0xde, 0xd2, 0xc5, + 0xe8, 0x00, 0x0d, 0xc3, 0xac, 0x11, 0x0b, 0xd3, 0x83, 0xb2, 0xb3, 0x57, 0xe5, 0x02, 0x56, 0x6e, + 0x60, 0xd7, 0x28, 0x37, 0x7b, 0x4e, 0xb1, 0x54, 0x4e, 0xf2, 0xa2, 0x9e, 0xe5, 0x92, 0x06, 0xee, + 0x71, 0xb8, 0x9c, 0xe6, 0xc0, 0xcc, 0x1a, 0x6e, 0x18, 0xdd, 0x7e, 0xea, 0x77, 0x0a, 0x58, 0xbe, + 0xf9, 0x1c, 0x37, 0x1c, 0x77, 0x93, 0x12, 0x9b, 0x12, 0xf7, 0x60, 0x0d, 0x37, 0x71, 0xfd, 0xba, + 0x6d, 0xed, 0x92, 0xaa, 0x47, 0x0d, 0x97, 0xd8, 0x16, 0xfc, 0x08, 0x14, 0x2c, 0xbb, 0x41, 0x2c, + 0x83, 0xcb, 0x4d, 0x8f, 0x52, 0x6c, 0x99, 0x07, 0xdb, 0x35, 0x83, 0x62, 0x56, 0x50, 0x96, 0x95, + 0xd3, 0x39, 0xfd, 0xaf, 0xed, 0x56, 0xa9, 0xb0, 0x91, 0x60, 0x83, 0x12, 0xbd, 0xe1, 0x7f, 0xc1, + 0x6c, 0x1d, 0x5b, 0x15, 0x63, 0xa7, 0x8e, 0x37, 0x31, 0x35, 0xb1, 0xe5, 0x16, 0x32, 0x02, 0x70, + 0xbe, 0xdd, 0x2a, 0xcd, 0xae, 0xc5, 0x55, 0xa8, 0xdb, 0x56, 0x7d, 0x0c, 0x16, 0xff, 0x57, 0xb7, + 0xf7, 0x6f, 0x10, 0xe6, 0x12, 0xab, 0xea, 0x11, 0x56, 0xc3, 0x74, 0x1d, 0xbb, 0x35, 0xbb, 0x02, + 0xaf, 0x81, 0xac, 0x7b, 0xe0, 0x60, 0x71, 0xbe, 0xbc, 0x7e, 0xe6, 0x55, 0xab, 0x34, 0xd2, 0x6e, + 0x95, 0xb2, 0x0f, 0x0e, 0x1c, 0xfc, 0xae, 0x55, 0x3a, 0x91, 0xe0, 0xc6, 0xd5, 0x48, 0x38, 0xaa, + 0x2f, 0x33, 0x00, 0x70, 0xab, 0x6d, 0x91, 0x38, 0xf8, 0x14, 0x4c, 0xf0, 0xc7, 0xaa, 0x18, 0xae, + 0x21, 0x30, 0x27, 0x2f, 0x9c, 0xd7, 0xa2, 0x52, 0x09, 0x73, 0xae, 0x39, 0x7b, 0x55, 0x2e, 0x60, + 0x1a, 0xb7, 0xd6, 0x9a, 0x2b, 0xda, 0xfd, 0x9d, 0x67, 0xd8, 0x74, 0xd7, 0xb1, 0x6b, 0xe8, 0x50, + 0x9e, 0x02, 0x44, 0x32, 0x14, 0xa2, 0xc2, 0x2d, 0x90, 0x65, 0x0e, 0x36, 0x45, 0x02, 0x26, 0x2f, + 0x94, 0xb5, 0x94, 0x42, 0xd4, 0xa2, 0xc3, 0x6d, 0x3b, 0xd8, 0xd4, 0xa7, 0x82, 0x2b, 0xf2, 0x2f, + 0x24, 0xa0, 0xe0, 0xc7, 0x60, 0x8c, 0xb9, 0x86, 0xeb, 0xb1, 0xc2, 0xa8, 0x00, 0x5d, 0x79, 0x1f, + 0x50, 0xe1, 0xa8, 0xcf, 0x48, 0xd8, 0x31, 0xff, 0x1b, 0x49, 0x40, 0xf5, 0x4d, 0x06, 0xcc, 0x47, + 0xc6, 0xd7, 0x6d, 0xab, 0x42, 0x44, 0xad, 0x5c, 0x8d, 0xe5, 0xfd, 0x54, 0x57, 0xde, 0x17, 0xfb, + 0xb8, 0x44, 0x39, 0x87, 0xff, 0x09, 0xcf, 0x9b, 0x11, 0xee, 0x27, 0xe3, 0xc1, 0xdf, 0xb5, 0x4a, + 0xb3, 0xa1, 0x5b, 0xfc, 0x3c, 0xb0, 0x09, 0x60, 0xdd, 0x60, 0xee, 0x03, 0x6a, 0x58, 0xcc, 0x87, + 0x25, 0x0d, 0x2c, 0xaf, 0xfd, 0xcf, 0xe1, 0x5e, 0x8a, 0x7b, 0xe8, 0x4b, 0x32, 0x24, 0x5c, 0xeb, + 0x41, 0x43, 0x7d, 0x22, 0xc0, 0xbf, 0x83, 0x31, 0x8a, 0x0d, 0x66, 0x5b, 0x85, 0xac, 0x38, 0x72, + 0x98, 0x2f, 0x24, 0xa4, 0x48, 0x6a, 0xe1, 0x3f, 0xc0, 0x78, 0x03, 0x33, 0x66, 0x54, 0x71, 0x21, + 0x27, 0x0c, 0x67, 0xa5, 0xe1, 0xf8, 0xba, 0x2f, 0x46, 0x81, 0x5e, 0xfd, 0x5e, 0x01, 0x33, 0x51, + 0x9e, 0xd6, 0x08, 0x73, 0xe1, 0x93, 0x9e, 0xea, 0xd3, 0x86, 0xbb, 0x13, 0xf7, 0x16, 0xb5, 0x37, + 0x27, 0xc3, 0x4d, 0x04, 0x92, 0x8e, 0xca, 0xdb, 0x04, 0x39, 0xe2, 0xe2, 0x06, 0xcf, 0xfa, 0xe8, + 0xe9, 0xc9, 0x0b, 0x67, 0xde, 0xa3, 0x4a, 0xf4, 0x69, 0x89, 0x9b, 0xbb, 0xc3, 0x11, 0x90, 0x0f, + 0xa4, 0xfe, 0x3c, 0xda, 0x79, 0x05, 0x5e, 0x91, 0xf0, 0x6b, 0x05, 0x2c, 0x39, 0x89, 0x33, 0x46, + 0xde, 0xea, 0x46, 0x6a, 0xe8, 0xe4, 0x31, 0x85, 0xf0, 0x2e, 0xe6, 0xb3, 0x05, 0xeb, 0xaa, 0x3c, + 0xd3, 0xd2, 0x00, 0xe3, 0x01, 0x67, 0x81, 0x77, 0x01, 0x6c, 0x18, 0x2e, 0xcf, 0x69, 0x75, 0x93, + 0x62, 0x13, 0x57, 0x38, 0xaa, 0x1c, 0x4c, 0x61, 0x7d, 0xac, 0xf7, 0x58, 0xa0, 0x3e, 0x5e, 0xf0, + 0x0b, 0x05, 0xcc, 0x57, 0x7a, 0x07, 0x8d, 0xac, 0xcc, 0x2b, 0x43, 0xa5, 0xba, 0xcf, 0xa0, 0xd2, + 0x17, 0xdb, 0xad, 0xd2, 0x7c, 0x1f, 0x05, 0xea, 0x17, 0x0d, 0x7e, 0x02, 0x72, 0xd4, 0xab, 0x63, + 0x56, 0xc8, 0x8a, 0x17, 0x4e, 0x0f, 0xbb, 0x69, 0xd7, 0x89, 0x79, 0x80, 0xb8, 0xcf, 0xff, 0x89, + 0x5b, 0xdb, 0xf6, 0xc4, 0xc4, 0x62, 0xd1, 0x73, 0x0b, 0x15, 0xf2, 0x51, 0xd5, 0x17, 0x60, 0xae, + 0x7b, 0x70, 0xc0, 0x1a, 0x00, 0x66, 0xd0, 0xab, 0x7c, 0x4d, 0xf0, 0xb8, 0x17, 0xdf, 0xa3, 0xb2, + 0xc2, 0x46, 0x8f, 0xc6, 0x66, 0x28, 0x62, 0xa8, 0x03, 0x5b, 0x3d, 0x0f, 0xa6, 0x6e, 0x51, 0xdb, + 0x73, 0xe4, 0x21, 0xe1, 0x32, 0xc8, 0x5a, 0x46, 0x23, 0x18, 0x41, 0xe1, 0x5c, 0xdc, 0x30, 0x1a, + 0x18, 0x09, 0x8d, 0xfa, 0x95, 0x02, 0xa6, 0xd7, 0x48, 0x83, 0xb8, 0x08, 0x33, 0xc7, 0xb6, 0x18, + 0x86, 0x97, 0x62, 0x63, 0xeb, 0x64, 0xd7, 0xd8, 0x3a, 0x16, 0x33, 0xee, 0x18, 0x58, 0x4f, 0xc0, + 0xf8, 0xa7, 0x1e, 0xf6, 0x88, 0x55, 0x95, 0x63, 0xfb, 0x52, 0xea, 0x0d, 0xb7, 0x7c, 0xfb, 0x58, + 0xc5, 0xe9, 0x93, 0x7c, 0x10, 0x48, 0x0d, 0x0a, 0x20, 0xd5, 0xdf, 0x32, 0xe0, 0xa4, 0x88, 0x8c, + 0x2b, 0x03, 0xb6, 0xf3, 0x13, 0x50, 0x30, 0x18, 0xf3, 0x28, 0xae, 0x24, 0x6d, 0xe7, 0x65, 0x79, + 0x9d, 0xc2, 0x6a, 0x82, 0x1d, 0x4a, 0x44, 0x80, 0x7b, 0x60, 0xba, 0xde, 0x79, 0x79, 0x79, 0x4f, + 0x2d, 0xf5, 0x9e, 0xb1, 0x94, 0xe9, 0x0b, 0xf2, 0x08, 0xf1, 0xb4, 0xa3, 0x38, 0x76, 0x3f, 0x3a, + 0x30, 0x3a, 0x3c, 0x1d, 0x80, 0xf7, 0xc1, 0xc2, 0x8e, 0x4d, 0xa9, 0xbd, 0x4f, 0xac, 0xaa, 0x88, + 0x13, 0x80, 0x64, 0x05, 0xc8, 0x5f, 0xda, 0xad, 0xd2, 0x82, 0xde, 0xcf, 0x00, 0xf5, 0xf7, 0x53, + 0xf7, 0xc1, 0xc2, 0x06, 0x1f, 0x2c, 0xcc, 0xf6, 0xa8, 0x89, 0xa3, 0x9e, 0x80, 0x25, 0x90, 0x6b, + 0x62, 0xba, 0xe3, 0xd7, 0x75, 0x5e, 0xcf, 0xf3, 0x8e, 0x78, 0xc4, 0x05, 0xc8, 0x97, 0xf3, 0x9b, + 0x58, 0x91, 0xe7, 0x43, 0xb4, 0xc6, 0x0a, 0x63, 0xc2, 0x54, 0xdc, 0x64, 0x23, 0xae, 0x42, 0xdd, + 0xb6, 0xea, 0x61, 0x06, 0x2c, 0x26, 0xb4, 0x20, 0x7c, 0x04, 0x26, 0x98, 0xfc, 0x5b, 0xb6, 0xd5, + 0xe9, 0xd4, 0xc7, 0x90, 0xce, 0xd1, 0x16, 0x08, 0xd0, 0x50, 0x88, 0x05, 0x1d, 0x30, 0x4d, 0xe5, + 0x19, 0x44, 0x50, 0xb9, 0x0d, 0xfe, 0x95, 0x0a, 0xde, 0x9b, 0x9f, 0xe8, 0xb9, 0x51, 0x27, 0x22, + 0x8a, 0x07, 0x80, 0x2f, 0xc0, 0x5c, 0xc7, 0xc5, 0xfd, 0xa0, 0xa3, 0x22, 0xe8, 0xe5, 0xd4, 0xa0, + 0x7d, 0xdf, 0x45, 0x2f, 0xc8, 0xb8, 0x73, 0x1b, 0x5d, 0xb8, 0xa8, 0x27, 0x92, 0xfa, 0x63, 0x06, + 0x0c, 0x58, 0x10, 0x1f, 0x80, 0xf0, 0x19, 0x31, 0xc2, 0x77, 0xed, 0x08, 0xab, 0x2f, 0x91, 0x00, + 0x92, 0x2e, 0x02, 0xb8, 0x7a, 0x94, 0x20, 0x83, 0x09, 0xe1, 0x2f, 0x19, 0xf0, 0xb7, 0x64, 0xe7, + 0x88, 0x20, 0xde, 0x8b, 0x4d, 0xda, 0x7f, 0x77, 0x4d, 0xda, 0x53, 0x43, 0x40, 0xfc, 0x49, 0x18, + 0xbb, 0x08, 0xe3, 0x5b, 0x05, 0x14, 0x93, 0xf3, 0xf6, 0x01, 0x08, 0xe4, 0xd3, 0x38, 0x81, 0xbc, + 0x7a, 0x84, 0x2a, 0x4b, 0x20, 0x94, 0xb7, 0x06, 0x15, 0x57, 0xc8, 0xfc, 0x86, 0x58, 0xfd, 0xdf, + 0x64, 0x06, 0xe5, 0x4a, 0x30, 0xd5, 0x94, 0x9f, 0x30, 0x31, 0xef, 0x9b, 0x16, 0x5f, 0x40, 0x0d, + 0xbe, 0x43, 0xfc, 0x8a, 0x24, 0x60, 0xbc, 0xee, 0xaf, 0x6c, 0xd9, 0xd7, 0xfa, 0x70, 0x9b, 0x72, + 0xd0, 0x8a, 0xf7, 0xe9, 0x81, 0x34, 0x43, 0x01, 0x3e, 0xc4, 0x60, 0x0c, 0x8b, 0x9f, 0xee, 0x43, + 0x37, 0x77, 0xda, 0x2f, 0x7d, 0x1d, 0xf0, 0x42, 0xf4, 0xad, 0x90, 0x04, 0x57, 0x5f, 0x2a, 0x60, + 0x39, 0x6d, 0x2a, 0xc0, 0xe7, 0x7d, 0xd8, 0xde, 0x51, 0xc8, 0xfc, 0xf0, 0xec, 0xef, 0x5b, 0x05, + 0x1c, 0xef, 0xc7, 0xa9, 0x78, 0xa3, 0x71, 0x22, 0x15, 0xb2, 0xa0, 0xb0, 0xd1, 0xb6, 0x84, 0x14, + 0x49, 0x2d, 0x3c, 0x0b, 0x26, 0x6a, 0x86, 0x55, 0xd9, 0x26, 0x9f, 0x05, 0x1c, 0x3f, 0x2c, 0xf5, + 0xdb, 0x52, 0x8e, 0x42, 0x0b, 0x78, 0x03, 0xcc, 0x09, 0xbf, 0x35, 0x6c, 0x55, 0xdd, 0x9a, 0x78, + 0x13, 0xc9, 0x51, 0xc2, 0xdd, 0xb3, 0xd5, 0xa5, 0x47, 0x3d, 0x1e, 0xea, 0xaf, 0x0a, 0x80, 0x7f, + 0x84, 0x56, 0x9c, 0x01, 0x79, 0xc3, 0x21, 0x82, 0xed, 0xfa, 0xcd, 0x96, 0xd7, 0xa7, 0xdb, 0xad, + 0x52, 0x7e, 0x75, 0xf3, 0x8e, 0x2f, 0x44, 0x91, 0x9e, 0x1b, 0x07, 0xfb, 0xd6, 0xdf, 0xab, 0xd2, + 0x38, 0x08, 0xcc, 0x50, 0xa4, 0x87, 0x57, 0xc0, 0x94, 0x59, 0xf7, 0x98, 0x8b, 0xe9, 0xb6, 0x69, + 0x3b, 0x58, 0x0c, 0xa7, 0x09, 0xfd, 0xb8, 0xbc, 0xd3, 0xd4, 0xf5, 0x0e, 0x1d, 0x8a, 0x59, 0x42, + 0x0d, 0x00, 0xde, 0x59, 0xcc, 0x31, 0x78, 0x9c, 0x9c, 0x88, 0x33, 0xc3, 0x1f, 0x6c, 0x23, 0x94, + 0xa2, 0x0e, 0x0b, 0xf5, 0x19, 0x58, 0xd8, 0xc6, 0xb4, 0x49, 0x4c, 0xbc, 0x6a, 0x9a, 0xb6, 0x67, + 0xb9, 0x01, 0x6f, 0x2f, 0x83, 0x7c, 0x68, 0x26, 0x9b, 0xef, 0x98, 0x8c, 0x9f, 0x0f, 0xb1, 0x50, + 0x64, 0x13, 0x76, 0x7b, 0x26, 0xb1, 0xdb, 0x7f, 0xc8, 0x80, 0xf1, 0x08, 0x3e, 0xbb, 0x47, 0xac, + 0x8a, 0x44, 0x3e, 0x11, 0x58, 0xdf, 0x23, 0x56, 0xe5, 0x5d, 0xab, 0x34, 0x29, 0xcd, 0xf8, 0x27, + 0x12, 0x86, 0xf0, 0x2e, 0xc8, 0x7a, 0x0c, 0x53, 0xd9, 0xc7, 0x67, 0x53, 0xab, 0xf9, 0x21, 0xc3, + 0x34, 0x20, 0x5a, 0x13, 0x1c, 0x9a, 0x0b, 0x90, 0xc0, 0x80, 0x1b, 0x20, 0x57, 0xe5, 0xaf, 0x22, + 0x5b, 0xf5, 0x5c, 0x2a, 0x58, 0xe7, 0x2f, 0x1a, 0xbf, 0x10, 0x84, 0x04, 0xf9, 0x30, 0x90, 0x82, + 0x19, 0x16, 0x4b, 0xa2, 0x78, 0xb0, 0x61, 0x88, 0x53, 0xdf, 0xdc, 0xeb, 0xb0, 0xdd, 0x2a, 0xcd, + 0xc4, 0x55, 0xa8, 0x2b, 0x82, 0x5a, 0x06, 0x93, 0x1d, 0x57, 0x4c, 0x9f, 0xb5, 0xfa, 0xcd, 0x57, + 0x87, 0xc5, 0x91, 0xd7, 0x87, 0xc5, 0x91, 0x37, 0x87, 0xc5, 0x91, 0xcf, 0xdb, 0x45, 0xe5, 0x55, + 0xbb, 0xa8, 0xbc, 0x6e, 0x17, 0x95, 0x37, 0xed, 0xa2, 0xf2, 0xb6, 0x5d, 0x54, 0xbe, 0xfc, 0xa9, + 0x38, 0xf2, 0xb8, 0x94, 0xf2, 0x2f, 0xda, 0xdf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc1, 0x6c, 0x4e, + 0x4e, 0xdd, 0x15, 0x00, 0x00, +} + +func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExemptPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExemptPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LendablePercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LendablePercent)) + i-- + dAtA[i] = 0x10 + } + if m.NominalConcurrencyShares != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.NominalConcurrencyShares)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *FlowDistinguisherMethod) Marshal() (dAtA []byte, err error) { @@ -1155,6 +1224,16 @@ func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (i _ = i var l int _ = l + if m.BorrowingLimitPercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.BorrowingLimitPercent)) + i-- + dAtA[i] = 0x20 + } + if m.LendablePercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LendablePercent)) + i-- + dAtA[i] = 0x18 + } { size, err := m.LimitResponse.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -1478,6 +1557,18 @@ func (m *PriorityLevelConfigurationSpec) MarshalToSizedBuffer(dAtA []byte) (int, _ = i var l int _ = l + if m.Exempt != nil { + { + size, err := m.Exempt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } if m.Limited != nil { { size, err := m.Limited.MarshalToSizedBuffer(dAtA[:i]) @@ -1770,6 +1861,21 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *ExemptPriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NominalConcurrencyShares != nil { + n += 1 + sovGenerated(uint64(*m.NominalConcurrencyShares)) + } + if m.LendablePercent != nil { + n += 1 + sovGenerated(uint64(*m.LendablePercent)) + } + return n +} + func (m *FlowDistinguisherMethod) Size() (n int) { if m == nil { return 0 @@ -1904,6 +2010,12 @@ func (m *LimitedPriorityLevelConfiguration) Size() (n int) { n += 1 + sovGenerated(uint64(m.AssuredConcurrencyShares)) l = m.LimitResponse.Size() n += 1 + l + sovGenerated(uint64(l)) + if m.LendablePercent != nil { + n += 1 + sovGenerated(uint64(*m.LendablePercent)) + } + if m.BorrowingLimitPercent != nil { + n += 1 + sovGenerated(uint64(*m.BorrowingLimitPercent)) + } return n } @@ -2029,6 +2141,10 @@ func (m *PriorityLevelConfigurationSpec) Size() (n int) { l = m.Limited.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Exempt != nil { + l = m.Exempt.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2146,6 +2262,17 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ExemptPriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExemptPriorityLevelConfiguration{`, + `NominalConcurrencyShares:` + valueToStringGenerated(this.NominalConcurrencyShares) + `,`, + `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`, + `}`, + }, "") + return s +} func (this *FlowDistinguisherMethod) String() string { if this == nil { return "nil" @@ -2259,6 +2386,8 @@ func (this *LimitedPriorityLevelConfiguration) String() string { s := strings.Join([]string{`&LimitedPriorityLevelConfiguration{`, `AssuredConcurrencyShares:` + fmt.Sprintf("%v", this.AssuredConcurrencyShares) + `,`, `LimitResponse:` + strings.Replace(strings.Replace(this.LimitResponse.String(), "LimitResponse", "LimitResponse", 1), `&`, ``, 1) + `,`, + `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`, + `BorrowingLimitPercent:` + valueToStringGenerated(this.BorrowingLimitPercent) + `,`, `}`, }, "") return s @@ -2360,6 +2489,7 @@ func (this *PriorityLevelConfigurationSpec) String() string { s := strings.Join([]string{`&PriorityLevelConfigurationSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Limited:` + strings.Replace(this.Limited.String(), "LimitedPriorityLevelConfiguration", "LimitedPriorityLevelConfiguration", 1) + `,`, + `Exempt:` + strings.Replace(this.Exempt.String(), "ExemptPriorityLevelConfiguration", "ExemptPriorityLevelConfiguration", 1) + `,`, `}`, }, "") return s @@ -2447,6 +2577,96 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *ExemptPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NominalConcurrencyShares", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NominalConcurrencyShares = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LendablePercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LendablePercent = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -3543,6 +3763,46 @@ func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LendablePercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LendablePercent = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BorrowingLimitPercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BorrowingLimitPercent = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4486,6 +4746,42 @@ func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exempt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exempt == nil { + m.Exempt = &ExemptPriorityLevelConfiguration{} + } + if err := m.Exempt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto index 347c9d3fa..6509386f2 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto @@ -28,6 +28,40 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/flowcontrol/v1alpha1"; +// ExemptPriorityLevelConfiguration describes the configurable aspects +// of the handling of exempt requests. +// In the mandatory exempt configuration object the values in the fields +// here can be modified by authorized users, unlike the rest of the `spec`. +message ExemptPriorityLevelConfiguration { + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats nominally reserved for this priority level. + // This DOES NOT limit the dispatching from this priority level + // but affects the other priority levels through the borrowing mechanism. + // The server's concurrency limit (ServerCL) is divided among all the + // priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // This field has a default value of zero. + // +optional + optional int32 nominalConcurrencyShares = 1; + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. This value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + optional int32 lendablePercent = 2; +} + // FlowDistinguisherMethod specifies the method of a flow distinguisher. message FlowDistinguisherMethod { // `type` is the type of flow distinguisher method @@ -153,8 +187,8 @@ message LimitResponse { // LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. // It addresses two issues: -// * How are requests for this priority level limited? -// * What should be done with requests that exceed the limit? +// - How are requests for this priority level limited? +// - What should be done with requests that exceed the limit? message LimitedPriorityLevelConfiguration { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this @@ -176,6 +210,35 @@ message LimitedPriorityLevelConfiguration { // `limitResponse` indicates what to do with requests that can not be executed right now optional LimitResponse limitResponse = 2; + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. The value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + optional int32 lendablePercent = 3; + + // `borrowingLimitPercent`, if present, configures a limit on how many + // seats this priority level can borrow from other priority levels. + // The limit is known as this level's BorrowingConcurrencyLimit + // (BorrowingCL) and is a limit on the total number of seats that this + // level may borrow at any one time. + // This field holds the ratio of that limit to the level's nominal + // concurrency limit. When this field is non-nil, it must hold a + // non-negative integer and the limit is calculated as follows. + // + // BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 ) + // + // The value of this field can be more than 100, implying that this + // priority level can borrow a number of seats that is greater than + // its own nominal concurrency limit (NominalCL). + // When this field is left `nil`, the limit is effectively infinite. + // +optional + optional int32 borrowingLimitPercent = 4; } // NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the @@ -303,6 +366,14 @@ message PriorityLevelConfigurationSpec { // This field must be non-empty if and only if `type` is `"Limited"`. // +optional optional LimitedPriorityLevelConfiguration limited = 2; + + // `exempt` specifies how requests are handled for an exempt priority level. + // This field MUST be empty if `type` is `"Limited"`. + // This field MAY be non-empty if `type` is `"Exempt"`. + // If empty and `type` is `"Exempt"` then the default values + // for `ExemptPriorityLevelConfiguration` apply. + // +optional + optional ExemptPriorityLevelConfiguration exempt = 3; } // PriorityLevelConfigurationStatus represents the current state of a "request-priority". diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go index 5af677e2f..161411ff3 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go @@ -63,7 +63,7 @@ const ( // +k8s:prerelease-lifecycle-gen:introduced=1.18 // +k8s:prerelease-lifecycle-gen:deprecated=1.20 // +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,FlowSchema +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,FlowSchema // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -87,7 +87,7 @@ type FlowSchema struct { // +k8s:prerelease-lifecycle-gen:introduced=1.18 // +k8s:prerelease-lifecycle-gen:deprecated=1.20 // +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,FlowSchemaList +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,FlowSchemaList // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -344,7 +344,7 @@ type FlowSchemaConditionType string // +k8s:prerelease-lifecycle-gen:introduced=1.18 // +k8s:prerelease-lifecycle-gen:deprecated=1.20 // +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,PriorityLevelConfiguration +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,PriorityLevelConfiguration // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -367,7 +367,7 @@ type PriorityLevelConfiguration struct { // +k8s:prerelease-lifecycle-gen:introduced=1.18 // +k8s:prerelease-lifecycle-gen:deprecated=1.20 // +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,PriorityLevelConfigurationList +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,PriorityLevelConfigurationList // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { @@ -399,6 +399,14 @@ type PriorityLevelConfigurationSpec struct { // This field must be non-empty if and only if `type` is `"Limited"`. // +optional Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"` + + // `exempt` specifies how requests are handled for an exempt priority level. + // This field MUST be empty if `type` is `"Limited"`. + // This field MAY be non-empty if `type` is `"Exempt"`. + // If empty and `type` is `"Exempt"` then the default values + // for `ExemptPriorityLevelConfiguration` apply. + // +optional + Exempt *ExemptPriorityLevelConfiguration `json:"exempt,omitempty" protobuf:"bytes,3,opt,name=exempt"` } // PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level @@ -415,8 +423,8 @@ const ( // LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. // It addresses two issues: -// * How are requests for this priority level limited? -// * What should be done with requests that exceed the limit? +// - How are requests for this priority level limited? +// - What should be done with requests that exceed the limit? type LimitedPriorityLevelConfiguration struct { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this @@ -438,6 +446,72 @@ type LimitedPriorityLevelConfiguration struct { // `limitResponse` indicates what to do with requests that can not be executed right now LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"` + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. The value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + LendablePercent *int32 `json:"lendablePercent,omitempty" protobuf:"varint,3,opt,name=lendablePercent"` + + // `borrowingLimitPercent`, if present, configures a limit on how many + // seats this priority level can borrow from other priority levels. + // The limit is known as this level's BorrowingConcurrencyLimit + // (BorrowingCL) and is a limit on the total number of seats that this + // level may borrow at any one time. + // This field holds the ratio of that limit to the level's nominal + // concurrency limit. When this field is non-nil, it must hold a + // non-negative integer and the limit is calculated as follows. + // + // BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 ) + // + // The value of this field can be more than 100, implying that this + // priority level can borrow a number of seats that is greater than + // its own nominal concurrency limit (NominalCL). + // When this field is left `nil`, the limit is effectively infinite. + // +optional + BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty" protobuf:"varint,4,opt,name=borrowingLimitPercent"` +} + +// ExemptPriorityLevelConfiguration describes the configurable aspects +// of the handling of exempt requests. +// In the mandatory exempt configuration object the values in the fields +// here can be modified by authorized users, unlike the rest of the `spec`. +type ExemptPriorityLevelConfiguration struct { + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats nominally reserved for this priority level. + // This DOES NOT limit the dispatching from this priority level + // but affects the other priority levels through the borrowing mechanism. + // The server's concurrency limit (ServerCL) is divided among all the + // priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // This field has a default value of zero. + // +optional + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty" protobuf:"varint,1,opt,name=nominalConcurrencyShares"` + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. This value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + LendablePercent *int32 `json:"lendablePercent,omitempty" protobuf:"varint,2,opt,name=lendablePercent"` + // The `BorrowingCL` of an Exempt priority level is implicitly `ServerCL`. + // In other words, an exempt priority level + // has no meaningful limit on how much it borrows. + // There is no explicit representation of that here. } // LimitResponse defines how to handle requests that can not be executed right now. diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go index 1827be02d..1d0680c10 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go @@ -24,9 +24,19 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ExemptPriorityLevelConfiguration = map[string]string{ + "": "ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`.", + "nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.", + "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", +} + +func (ExemptPriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_ExemptPriorityLevelConfiguration +} + var map_FlowDistinguisherMethod = map[string]string{ "": "FlowDistinguisherMethod specifies the method of a flow distinguisher.", "type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.", @@ -111,9 +121,11 @@ func (LimitResponse) SwaggerDoc() map[string]string { } var map_LimitedPriorityLevelConfiguration = map[string]string{ - "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", + "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", + "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", + "borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", } func (LimitedPriorityLevelConfiguration) SwaggerDoc() map[string]string { @@ -188,6 +200,7 @@ var map_PriorityLevelConfigurationSpec = map[string]string{ "": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.", "type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.", "limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.", + "exempt": "`exempt` specifies how requests are handled for an exempt priority level. This field MUST be empty if `type` is `\"Limited\"`. This field MAY be non-empty if `type` is `\"Exempt\"`. If empty and `type` is `\"Exempt\"` then the default values for `ExemptPriorityLevelConfiguration` apply.", } func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go index 7f73f4606..a5c9737aa 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go @@ -25,6 +25,32 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExemptPriorityLevelConfiguration) DeepCopyInto(out *ExemptPriorityLevelConfiguration) { + *out = *in + if in.NominalConcurrencyShares != nil { + in, out := &in.NominalConcurrencyShares, &out.NominalConcurrencyShares + *out = new(int32) + **out = **in + } + if in.LendablePercent != nil { + in, out := &in.LendablePercent, &out.LendablePercent + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExemptPriorityLevelConfiguration. +func (in *ExemptPriorityLevelConfiguration) DeepCopy() *ExemptPriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(ExemptPriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) { *out = *in @@ -212,6 +238,16 @@ func (in *LimitResponse) DeepCopy() *LimitResponse { func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) { *out = *in in.LimitResponse.DeepCopyInto(&out.LimitResponse) + if in.LendablePercent != nil { + in, out := &in.LendablePercent, &out.LendablePercent + *out = new(int32) + **out = **in + } + if in.BorrowingLimitPercent != nil { + in, out := &in.BorrowingLimitPercent, &out.BorrowingLimitPercent + *out = new(int32) + **out = **in + } return } @@ -390,6 +426,11 @@ func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigu *out = new(LimitedPriorityLevelConfiguration) (*in).DeepCopyInto(*out) } + if in.Exempt != nil { + in, out := &in.Exempt, &out.Exempt + *out = new(ExemptPriorityLevelConfiguration) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go index 226014160..2b6a3d3fd 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -40,7 +40,7 @@ func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchema"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "FlowSchema"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. @@ -64,7 +64,7 @@ func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchemaList"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "FlowSchemaList"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. @@ -88,7 +88,7 @@ func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfiguration"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfiguration"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. @@ -112,7 +112,7 @@ func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfigurationList"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfigurationList"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go index e1d17b5b8..33f4b97e3 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go @@ -43,10 +43,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} } +func (*ExemptPriorityLevelConfiguration) ProtoMessage() {} +func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{0} +} +func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExemptPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExemptPriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExemptPriorityLevelConfiguration.Merge(m, src) +} +func (m *ExemptPriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *ExemptPriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ExemptPriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo + func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } func (*FlowDistinguisherMethod) ProtoMessage() {} func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{0} + return fileDescriptor_80171c2a4e3669de, []int{1} } func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,7 +102,7 @@ var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo func (m *FlowSchema) Reset() { *m = FlowSchema{} } func (*FlowSchema) ProtoMessage() {} func (*FlowSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{1} + return fileDescriptor_80171c2a4e3669de, []int{2} } func (m *FlowSchema) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -102,7 +130,7 @@ var xxx_messageInfo_FlowSchema proto.InternalMessageInfo func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } func (*FlowSchemaCondition) ProtoMessage() {} func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{2} + return fileDescriptor_80171c2a4e3669de, []int{3} } func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -130,7 +158,7 @@ var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } func (*FlowSchemaList) ProtoMessage() {} func (*FlowSchemaList) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{3} + return fileDescriptor_80171c2a4e3669de, []int{4} } func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -158,7 +186,7 @@ var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } func (*FlowSchemaSpec) ProtoMessage() {} func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{4} + return fileDescriptor_80171c2a4e3669de, []int{5} } func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,7 +214,7 @@ var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } func (*FlowSchemaStatus) ProtoMessage() {} func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{5} + return fileDescriptor_80171c2a4e3669de, []int{6} } func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -214,7 +242,7 @@ var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo func (m *GroupSubject) Reset() { *m = GroupSubject{} } func (*GroupSubject) ProtoMessage() {} func (*GroupSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{6} + return fileDescriptor_80171c2a4e3669de, []int{7} } func (m *GroupSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -242,7 +270,7 @@ var xxx_messageInfo_GroupSubject proto.InternalMessageInfo func (m *LimitResponse) Reset() { *m = LimitResponse{} } func (*LimitResponse) ProtoMessage() {} func (*LimitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{7} + return fileDescriptor_80171c2a4e3669de, []int{8} } func (m *LimitResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -270,7 +298,7 @@ var xxx_messageInfo_LimitResponse proto.InternalMessageInfo func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{8} + return fileDescriptor_80171c2a4e3669de, []int{9} } func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -298,7 +326,7 @@ var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } func (*NonResourcePolicyRule) ProtoMessage() {} func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{9} + return fileDescriptor_80171c2a4e3669de, []int{10} } func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -326,7 +354,7 @@ var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } func (*PolicyRulesWithSubjects) ProtoMessage() {} func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{10} + return fileDescriptor_80171c2a4e3669de, []int{11} } func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -354,7 +382,7 @@ var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } func (*PriorityLevelConfiguration) ProtoMessage() {} func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{11} + return fileDescriptor_80171c2a4e3669de, []int{12} } func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -382,7 +410,7 @@ var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } func (*PriorityLevelConfigurationCondition) ProtoMessage() {} func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{12} + return fileDescriptor_80171c2a4e3669de, []int{13} } func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -410,7 +438,7 @@ var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInf func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } func (*PriorityLevelConfigurationList) ProtoMessage() {} func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{13} + return fileDescriptor_80171c2a4e3669de, []int{14} } func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -438,7 +466,7 @@ var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } func (*PriorityLevelConfigurationReference) ProtoMessage() {} func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{14} + return fileDescriptor_80171c2a4e3669de, []int{15} } func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -466,7 +494,7 @@ var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInf func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } func (*PriorityLevelConfigurationSpec) ProtoMessage() {} func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{15} + return fileDescriptor_80171c2a4e3669de, []int{16} } func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -494,7 +522,7 @@ var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } func (*PriorityLevelConfigurationStatus) ProtoMessage() {} func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{16} + return fileDescriptor_80171c2a4e3669de, []int{17} } func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -522,7 +550,7 @@ var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } func (*QueuingConfiguration) ProtoMessage() {} func (*QueuingConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{17} + return fileDescriptor_80171c2a4e3669de, []int{18} } func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -550,7 +578,7 @@ var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } func (*ResourcePolicyRule) ProtoMessage() {} func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{18} + return fileDescriptor_80171c2a4e3669de, []int{19} } func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -578,7 +606,7 @@ var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } func (*ServiceAccountSubject) ProtoMessage() {} func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{19} + return fileDescriptor_80171c2a4e3669de, []int{20} } func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -606,7 +634,7 @@ var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{20} + return fileDescriptor_80171c2a4e3669de, []int{21} } func (m *Subject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -634,7 +662,7 @@ var xxx_messageInfo_Subject proto.InternalMessageInfo func (m *UserSubject) Reset() { *m = UserSubject{} } func (*UserSubject) ProtoMessage() {} func (*UserSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{21} + return fileDescriptor_80171c2a4e3669de, []int{22} } func (m *UserSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -660,6 +688,7 @@ func (m *UserSubject) XXX_DiscardUnknown() { var xxx_messageInfo_UserSubject proto.InternalMessageInfo func init() { + proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta1.ExemptPriorityLevelConfiguration") proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowDistinguisherMethod") proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchema") proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchemaCondition") @@ -689,101 +718,141 @@ func init() { } var fileDescriptor_80171c2a4e3669de = []byte{ - // 1496 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcb, 0x73, 0xdb, 0x44, - 0x18, 0x8f, 0x1c, 0x3b, 0x89, 0xbf, 0x3c, 0xbb, 0x69, 0x27, 0x9e, 0x74, 0xc6, 0x4e, 0xc5, 0x0c, - 0x05, 0xda, 0xca, 0x6d, 0x69, 0x69, 0x81, 0xe1, 0x11, 0xa5, 0x50, 0x4a, 0x93, 0x34, 0xdd, 0xb4, - 0xc0, 0x94, 0xce, 0x50, 0x59, 0xde, 0xd8, 0x6a, 0x6c, 0x49, 0xd5, 0xae, 0x9c, 0x09, 0xbd, 0x30, - 0xfc, 0x05, 0x9c, 0xe1, 0xc8, 0x81, 0x3b, 0xff, 0x00, 0x47, 0x3a, 0x9c, 0x7a, 0xec, 0xc9, 0x50, - 0x73, 0xe2, 0xc0, 0x1d, 0x7a, 0x62, 0x76, 0xb5, 0x92, 0x2c, 0xbf, 0xe4, 0x69, 0x67, 0x7a, 0xe2, - 0x66, 0x7d, 0x8f, 0xdf, 0xf7, 0xd8, 0xdf, 0x7e, 0xfb, 0x25, 0x70, 0x75, 0xff, 0x32, 0xd5, 0x2c, - 0xa7, 0xbc, 0xef, 0x57, 0x88, 0x67, 0x13, 0x46, 0x68, 0xb9, 0x45, 0xec, 0xaa, 0xe3, 0x95, 0xa5, - 0xc2, 0x70, 0xad, 0xf2, 0x5e, 0xc3, 0x39, 0x30, 0x1d, 0x9b, 0x79, 0x4e, 0xa3, 0xdc, 0x3a, 0x57, - 0x21, 0xcc, 0x38, 0x57, 0xae, 0x11, 0x9b, 0x78, 0x06, 0x23, 0x55, 0xcd, 0xf5, 0x1c, 0xe6, 0xa0, - 0x62, 0x60, 0xaf, 0x19, 0xae, 0xa5, 0x75, 0xd9, 0x6b, 0xd2, 0x7e, 0xf5, 0x4c, 0xcd, 0x62, 0x75, - 0xbf, 0xa2, 0x99, 0x4e, 0xb3, 0x5c, 0x73, 0x6a, 0x4e, 0x59, 0xb8, 0x55, 0xfc, 0x3d, 0xf1, 0x25, - 0x3e, 0xc4, 0xaf, 0x00, 0x6e, 0xf5, 0x42, 0x1c, 0xbe, 0x69, 0x98, 0x75, 0xcb, 0x26, 0xde, 0x61, - 0xd9, 0xdd, 0xaf, 0x71, 0x01, 0x2d, 0x37, 0x09, 0x33, 0xca, 0xad, 0xbe, 0x24, 0x56, 0xcb, 0xc3, - 0xbc, 0x3c, 0xdf, 0x66, 0x56, 0x93, 0xf4, 0x39, 0xbc, 0x95, 0xe6, 0x40, 0xcd, 0x3a, 0x69, 0x1a, - 0xbd, 0x7e, 0xea, 0x1d, 0x58, 0xf9, 0xb8, 0xe1, 0x1c, 0x5c, 0xb1, 0x28, 0xb3, 0xec, 0x9a, 0x6f, - 0xd1, 0x3a, 0xf1, 0xb6, 0x08, 0xab, 0x3b, 0x55, 0xf4, 0x01, 0x64, 0xd9, 0xa1, 0x4b, 0x0a, 0xca, - 0x9a, 0xf2, 0x5a, 0x5e, 0x3f, 0xf5, 0xa8, 0x5d, 0x9a, 0xe8, 0xb4, 0x4b, 0xd9, 0x5b, 0x87, 0x2e, - 0x79, 0xd6, 0x2e, 0x1d, 0x1f, 0xe2, 0xc6, 0xd5, 0x58, 0x38, 0xaa, 0xdf, 0x67, 0x00, 0xb8, 0xd5, - 0xae, 0x08, 0x8d, 0xee, 0xc1, 0x0c, 0x2f, 0xb7, 0x6a, 0x30, 0x43, 0x60, 0xce, 0x9e, 0x3f, 0xab, - 0xc5, 0xbd, 0x8e, 0xb2, 0xd6, 0xdc, 0xfd, 0x1a, 0x17, 0x50, 0x8d, 0x5b, 0x6b, 0xad, 0x73, 0xda, - 0x8d, 0xca, 0x7d, 0x62, 0xb2, 0x2d, 0xc2, 0x0c, 0x1d, 0xc9, 0x2c, 0x20, 0x96, 0xe1, 0x08, 0x15, - 0xed, 0x40, 0x96, 0xba, 0xc4, 0x2c, 0x64, 0x04, 0xba, 0xa6, 0x8d, 0x3e, 0x49, 0x2d, 0xce, 0x6d, - 0xd7, 0x25, 0xa6, 0x3e, 0x17, 0x56, 0xc8, 0xbf, 0xb0, 0x40, 0x42, 0x5f, 0xc0, 0x14, 0x65, 0x06, - 0xf3, 0x69, 0x61, 0xb2, 0x2f, 0xe3, 0x34, 0x4c, 0xe1, 0xa7, 0x2f, 0x48, 0xd4, 0xa9, 0xe0, 0x1b, - 0x4b, 0x3c, 0xf5, 0x49, 0x06, 0x96, 0x63, 0xe3, 0x0d, 0xc7, 0xae, 0x5a, 0xcc, 0x72, 0x6c, 0xf4, - 0x6e, 0xa2, 0xeb, 0x27, 0x7b, 0xba, 0xbe, 0x32, 0xc0, 0x25, 0xee, 0x38, 0x7a, 0x3b, 0x4a, 0x37, - 0x23, 0xdc, 0x4f, 0x24, 0x83, 0x3f, 0x6b, 0x97, 0x16, 0x23, 0xb7, 0x64, 0x3e, 0xa8, 0x05, 0xa8, - 0x61, 0x50, 0x76, 0xcb, 0x33, 0x6c, 0x1a, 0xc0, 0x5a, 0x4d, 0x22, 0xab, 0x7e, 0x63, 0xbc, 0x73, - 0xe2, 0x1e, 0xfa, 0xaa, 0x0c, 0x89, 0x36, 0xfb, 0xd0, 0xf0, 0x80, 0x08, 0xe8, 0x55, 0x98, 0xf2, - 0x88, 0x41, 0x1d, 0xbb, 0x90, 0x15, 0x29, 0x47, 0xfd, 0xc2, 0x42, 0x8a, 0xa5, 0x16, 0xbd, 0x0e, - 0xd3, 0x4d, 0x42, 0xa9, 0x51, 0x23, 0x85, 0x9c, 0x30, 0x5c, 0x94, 0x86, 0xd3, 0x5b, 0x81, 0x18, - 0x87, 0x7a, 0xf5, 0x17, 0x05, 0x16, 0xe2, 0x3e, 0x6d, 0x5a, 0x94, 0xa1, 0xbb, 0x7d, 0xdc, 0xd3, - 0xc6, 0xab, 0x89, 0x7b, 0x0b, 0xe6, 0x2d, 0xc9, 0x70, 0x33, 0xa1, 0xa4, 0x8b, 0x77, 0x37, 0x20, - 0x67, 0x31, 0xd2, 0xe4, 0x5d, 0x9f, 0xec, 0x69, 0x57, 0x0a, 0x49, 0xf4, 0x79, 0x09, 0x9b, 0xbb, - 0xc6, 0x01, 0x70, 0x80, 0xa3, 0xfe, 0x35, 0xd9, 0x5d, 0x01, 0xe7, 0x23, 0xfa, 0x49, 0x81, 0x55, - 0xd7, 0xb3, 0x1c, 0xcf, 0x62, 0x87, 0x9b, 0xa4, 0x45, 0x1a, 0x1b, 0x8e, 0xbd, 0x67, 0xd5, 0x7c, - 0xcf, 0xe0, 0xad, 0x94, 0x45, 0x6d, 0xa4, 0x45, 0xde, 0x19, 0x8a, 0x80, 0xc9, 0x1e, 0xf1, 0x88, - 0x6d, 0x12, 0x5d, 0x95, 0x29, 0xad, 0x8e, 0x30, 0x1e, 0x91, 0x0a, 0xfa, 0x14, 0x50, 0xd3, 0x60, - 0xbc, 0xa3, 0xb5, 0x1d, 0x8f, 0x98, 0xa4, 0xca, 0x51, 0x05, 0x21, 0x73, 0x31, 0x3b, 0xb6, 0xfa, - 0x2c, 0xf0, 0x00, 0x2f, 0xf4, 0xad, 0x02, 0xcb, 0xd5, 0xfe, 0x21, 0x23, 0x79, 0x79, 0x69, 0x9c, - 0x46, 0x0f, 0x98, 0x51, 0xfa, 0x4a, 0xa7, 0x5d, 0x5a, 0x1e, 0xa0, 0xc0, 0x83, 0x82, 0xa1, 0xbb, - 0x90, 0xf3, 0xfc, 0x06, 0xa1, 0x85, 0xac, 0x38, 0xde, 0xd4, 0xa8, 0x3b, 0x4e, 0xc3, 0x32, 0x0f, - 0x31, 0x77, 0xf9, 0xdc, 0x62, 0xf5, 0x5d, 0x5f, 0xcc, 0x2a, 0x1a, 0x9f, 0xb5, 0x50, 0xe1, 0x00, - 0x54, 0x7d, 0x08, 0x4b, 0xbd, 0x43, 0x03, 0xd5, 0x00, 0xcc, 0xf0, 0x9e, 0xd2, 0x82, 0x22, 0xc2, - 0xbe, 0x39, 0x3e, 0xab, 0xa2, 0x3b, 0x1e, 0xcf, 0xcb, 0x48, 0x44, 0x71, 0x17, 0xb4, 0x7a, 0x16, - 0xe6, 0xae, 0x7a, 0x8e, 0xef, 0xca, 0x1c, 0xd1, 0x1a, 0x64, 0x6d, 0xa3, 0x19, 0x4e, 0x9f, 0x68, - 0x22, 0x6e, 0x1b, 0x4d, 0x82, 0x85, 0x46, 0xfd, 0x51, 0x81, 0xf9, 0x4d, 0xab, 0x69, 0x31, 0x4c, - 0xa8, 0xeb, 0xd8, 0x94, 0xa0, 0x8b, 0x89, 0x89, 0x75, 0xa2, 0x67, 0x62, 0x1d, 0x49, 0x18, 0x77, - 0xcd, 0xaa, 0x2f, 0x61, 0xfa, 0x81, 0x4f, 0x7c, 0xcb, 0xae, 0xc9, 0x79, 0x7d, 0x21, 0xad, 0xc0, - 0x9b, 0x81, 0x79, 0x82, 0x6d, 0xfa, 0x2c, 0x1f, 0x01, 0x52, 0x83, 0x43, 0x44, 0xf5, 0x6f, 0x05, - 0x4e, 0x88, 0xc0, 0xa4, 0x3a, 0x9c, 0xc5, 0xe8, 0x2e, 0x14, 0x0c, 0x4a, 0x7d, 0x8f, 0x54, 0x37, - 0x1c, 0xdb, 0xf4, 0x3d, 0xce, 0xff, 0xc3, 0xdd, 0xba, 0xe1, 0x11, 0x2a, 0xaa, 0xc9, 0xe9, 0x6b, - 0xb2, 0x9a, 0xc2, 0xfa, 0x10, 0x3b, 0x3c, 0x14, 0x01, 0xdd, 0x87, 0xf9, 0x46, 0x77, 0xed, 0xb2, - 0xcc, 0x33, 0x69, 0x65, 0x26, 0x1a, 0xa6, 0x1f, 0x93, 0x19, 0x24, 0x9b, 0x8e, 0x93, 0xd0, 0xea, - 0x01, 0x1c, 0xdb, 0xe6, 0x77, 0x98, 0x3a, 0xbe, 0x67, 0x92, 0x98, 0x80, 0xa8, 0x04, 0xb9, 0x16, - 0xf1, 0x2a, 0x01, 0x89, 0xf2, 0x7a, 0x9e, 0xd3, 0xef, 0x33, 0x2e, 0xc0, 0x81, 0x1c, 0xbd, 0x07, - 0x8b, 0x76, 0xec, 0x79, 0x1b, 0x6f, 0xd2, 0xc2, 0x94, 0x30, 0x5d, 0xee, 0xb4, 0x4b, 0x8b, 0xdb, - 0x49, 0x15, 0xee, 0xb5, 0x55, 0xdb, 0x19, 0x58, 0x19, 0xc2, 0x77, 0x74, 0x1b, 0x66, 0xa8, 0xfc, - 0x2d, 0x39, 0x7c, 0x32, 0xad, 0x76, 0xe9, 0x1b, 0x4f, 0xdb, 0x10, 0x0c, 0x47, 0x50, 0xc8, 0x81, - 0x79, 0x4f, 0xa6, 0x20, 0x62, 0xca, 0xa9, 0x7b, 0x3e, 0x0d, 0xbb, 0xbf, 0x3b, 0x71, 0x73, 0x71, - 0x37, 0x20, 0x4e, 0xe2, 0xa3, 0x87, 0xb0, 0xd4, 0x55, 0x76, 0x10, 0x73, 0x52, 0xc4, 0xbc, 0x98, - 0x16, 0x73, 0xe0, 0xa1, 0xe8, 0x05, 0x19, 0x76, 0x69, 0xbb, 0x07, 0x16, 0xf7, 0x05, 0x52, 0x7f, - 0xcb, 0xc0, 0x88, 0x41, 0xfc, 0x12, 0x96, 0xaa, 0x7b, 0x89, 0xa5, 0xea, 0xfd, 0xe7, 0x7f, 0x61, - 0x86, 0x2e, 0x59, 0xf5, 0x9e, 0x25, 0xeb, 0xc3, 0x17, 0x88, 0x31, 0x7a, 0xe9, 0xfa, 0x27, 0x03, - 0xaf, 0x0c, 0x77, 0x8e, 0x97, 0xb0, 0xeb, 0x89, 0x91, 0x76, 0xa9, 0x67, 0xa4, 0x9d, 0x1c, 0x03, - 0xe2, 0xff, 0xa5, 0xac, 0x67, 0x29, 0xfb, 0x5d, 0x81, 0xe2, 0xf0, 0xbe, 0xbd, 0x84, 0x25, 0xed, - 0xab, 0xe4, 0x92, 0xf6, 0xce, 0xf3, 0x93, 0x6c, 0xc8, 0xd2, 0x76, 0x75, 0x14, 0xb7, 0xa2, 0xf5, - 0x6a, 0x8c, 0x27, 0xf6, 0xd7, 0x91, 0xad, 0x12, 0xdb, 0x60, 0xca, 0x5f, 0x09, 0x09, 0xef, 0x8f, - 0x6c, 0xa3, 0xd2, 0x20, 0x4d, 0x62, 0x33, 0x49, 0xc8, 0x3a, 0x4c, 0x37, 0x82, 0xb7, 0x51, 0x5e, - 0xea, 0xf5, 0xb1, 0x9e, 0xa4, 0x51, 0x4f, 0x69, 0xf0, 0x0c, 0x4b, 0x33, 0x1c, 0xc2, 0xab, 0x3f, - 0x28, 0xb0, 0x96, 0x76, 0x59, 0xd1, 0xc1, 0x80, 0x65, 0xe7, 0x05, 0x16, 0xd9, 0xf1, 0x97, 0x9f, - 0x9f, 0x15, 0x38, 0x3a, 0x68, 0xa7, 0xe0, 0xf4, 0xe7, 0x8b, 0x44, 0xb4, 0x05, 0x44, 0xf4, 0xbf, - 0x29, 0xa4, 0x58, 0x6a, 0xd1, 0x69, 0x98, 0xa9, 0x1b, 0x76, 0x75, 0xd7, 0xfa, 0x3a, 0xdc, 0x6f, - 0x23, 0x02, 0x7e, 0x22, 0xe5, 0x38, 0xb2, 0x40, 0x57, 0x60, 0x49, 0xf8, 0x6d, 0x12, 0xbb, 0xc6, - 0xea, 0xa2, 0x57, 0xe2, 0x2a, 0xe7, 0xe2, 0xf7, 0xe0, 0x66, 0x8f, 0x1e, 0xf7, 0x79, 0xa8, 0xff, - 0x2a, 0x80, 0x9e, 0xe7, 0x9d, 0x3f, 0x05, 0x79, 0xc3, 0xb5, 0xc4, 0xb2, 0x17, 0x5c, 0x81, 0xbc, - 0x3e, 0xdf, 0x69, 0x97, 0xf2, 0xeb, 0x3b, 0xd7, 0x02, 0x21, 0x8e, 0xf5, 0xdc, 0x38, 0x7c, 0x02, - 0x83, 0xa7, 0x4e, 0x1a, 0x87, 0x81, 0x29, 0x8e, 0xf5, 0xe8, 0x32, 0xcc, 0x99, 0x0d, 0x9f, 0x32, - 0xe2, 0xed, 0x9a, 0x8e, 0x4b, 0xc4, 0xc8, 0x98, 0xd1, 0x8f, 0xca, 0x9a, 0xe6, 0x36, 0xba, 0x74, - 0x38, 0x61, 0x89, 0x34, 0x00, 0x4e, 0x78, 0xea, 0x1a, 0x3c, 0x4e, 0x4e, 0xc4, 0x59, 0xe0, 0x07, - 0xb6, 0x1d, 0x49, 0x71, 0x97, 0x85, 0x7a, 0x1f, 0x8e, 0xed, 0x12, 0xaf, 0x65, 0x99, 0x64, 0xdd, - 0x34, 0x1d, 0xdf, 0x66, 0xe1, 0xda, 0x5a, 0x86, 0x7c, 0x64, 0x26, 0xef, 0xc4, 0x11, 0x19, 0x3f, - 0x1f, 0x61, 0xe1, 0xd8, 0x26, 0xba, 0x84, 0x99, 0xe1, 0x97, 0x30, 0x03, 0xd3, 0x31, 0x7c, 0x76, - 0xdf, 0xb2, 0xab, 0x12, 0xf9, 0x78, 0x68, 0x7d, 0xdd, 0xb2, 0xab, 0xcf, 0xda, 0xa5, 0x59, 0x69, - 0xc6, 0x3f, 0xb1, 0x30, 0x44, 0xd7, 0x20, 0xeb, 0x53, 0xe2, 0xc9, 0xeb, 0x75, 0x2a, 0x8d, 0xcc, - 0xb7, 0x29, 0xf1, 0xc2, 0xcd, 0x67, 0x86, 0x23, 0x73, 0x01, 0x16, 0x10, 0x68, 0x0b, 0x72, 0x35, - 0x7e, 0x28, 0x72, 0xea, 0x9f, 0x4e, 0xc3, 0xea, 0x5e, 0xe7, 0x03, 0x1a, 0x08, 0x09, 0x0e, 0x50, - 0xd0, 0x03, 0x58, 0xa0, 0x89, 0x16, 0x8a, 0xe3, 0x1a, 0x63, 0x93, 0x19, 0xd8, 0x78, 0x1d, 0x75, - 0xda, 0xa5, 0x85, 0xa4, 0x0a, 0xf7, 0x04, 0x50, 0xcb, 0x30, 0xdb, 0x55, 0x60, 0xfa, 0xfc, 0xd3, - 0xaf, 0x3c, 0x7a, 0x5a, 0x9c, 0x78, 0xfc, 0xb4, 0x38, 0xf1, 0xe4, 0x69, 0x71, 0xe2, 0x9b, 0x4e, - 0x51, 0x79, 0xd4, 0x29, 0x2a, 0x8f, 0x3b, 0x45, 0xe5, 0x49, 0xa7, 0xa8, 0xfc, 0xd1, 0x29, 0x2a, - 0xdf, 0xfd, 0x59, 0x9c, 0xb8, 0x53, 0x1c, 0xfd, 0x7f, 0xbd, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, - 0x27, 0x1b, 0xb7, 0xd8, 0x11, 0x14, 0x00, 0x00, + // 1614 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcf, 0x73, 0xdb, 0xc4, + 0x17, 0x8f, 0x1c, 0x3b, 0x89, 0x5f, 0x7e, 0x76, 0xd3, 0x4c, 0xfc, 0x4d, 0xbf, 0x63, 0xa7, 0x62, + 0x86, 0x02, 0x6d, 0xe5, 0xb6, 0xb4, 0xb4, 0xc0, 0xf0, 0x23, 0x4a, 0x4b, 0x29, 0x4d, 0xd2, 0x74, + 0xd3, 0x42, 0xa7, 0x74, 0x86, 0xca, 0xf2, 0xc6, 0x56, 0x63, 0x4b, 0xea, 0xae, 0xe4, 0x10, 0x7a, + 0x61, 0xf8, 0x0b, 0x38, 0xc3, 0x91, 0x03, 0x27, 0x2e, 0x5c, 0x39, 0x70, 0xa4, 0xc3, 0xa9, 0xc7, + 0x9e, 0x0c, 0x35, 0x27, 0xfe, 0x03, 0xe8, 0x0c, 0x33, 0xcc, 0xae, 0xd6, 0x92, 0xe5, 0x5f, 0xf2, + 0xb4, 0x33, 0x3d, 0x71, 0x8b, 0xde, 0xfb, 0xbc, 0xcf, 0xdb, 0x7d, 0xfb, 0x7e, 0x39, 0x70, 0x79, + 0xef, 0x02, 0xd3, 0x2c, 0xa7, 0xb8, 0xe7, 0x97, 0x08, 0xb5, 0x89, 0x47, 0x58, 0xb1, 0x41, 0xec, + 0xb2, 0x43, 0x8b, 0x52, 0x61, 0xb8, 0x56, 0x71, 0xb7, 0xe6, 0xec, 0x9b, 0x8e, 0xed, 0x51, 0xa7, + 0x56, 0x6c, 0x9c, 0x2e, 0x11, 0xcf, 0x38, 0x5d, 0xac, 0x10, 0x9b, 0x50, 0xc3, 0x23, 0x65, 0xcd, + 0xa5, 0x8e, 0xe7, 0xa0, 0x7c, 0x80, 0xd7, 0x0c, 0xd7, 0xd2, 0x3a, 0xf0, 0x9a, 0xc4, 0xaf, 0x9c, + 0xac, 0x58, 0x5e, 0xd5, 0x2f, 0x69, 0xa6, 0x53, 0x2f, 0x56, 0x9c, 0x8a, 0x53, 0x14, 0x66, 0x25, + 0x7f, 0x57, 0x7c, 0x89, 0x0f, 0xf1, 0x57, 0x40, 0xb7, 0x72, 0x36, 0x72, 0x5f, 0x37, 0xcc, 0xaa, + 0x65, 0x13, 0x7a, 0x50, 0x74, 0xf7, 0x2a, 0x5c, 0xc0, 0x8a, 0x75, 0xe2, 0x19, 0xc5, 0x46, 0xcf, + 0x21, 0x56, 0x8a, 0x83, 0xac, 0xa8, 0x6f, 0x7b, 0x56, 0x9d, 0xf4, 0x18, 0xbc, 0x91, 0x64, 0xc0, + 0xcc, 0x2a, 0xa9, 0x1b, 0xdd, 0x76, 0xea, 0x4f, 0x0a, 0xac, 0x5e, 0xfa, 0x9c, 0xd4, 0x5d, 0x6f, + 0x9b, 0x5a, 0x0e, 0xb5, 0xbc, 0x83, 0x0d, 0xd2, 0x20, 0xb5, 0x75, 0xc7, 0xde, 0xb5, 0x2a, 0x3e, + 0x35, 0x3c, 0xcb, 0xb1, 0xd1, 0x2d, 0xc8, 0xd9, 0x4e, 0xdd, 0xb2, 0x0d, 0x2e, 0x37, 0x7d, 0x4a, + 0x89, 0x6d, 0x1e, 0xec, 0x54, 0x0d, 0x4a, 0x58, 0x4e, 0x59, 0x55, 0x5e, 0xc9, 0xe8, 0xff, 0x6f, + 0x35, 0x0b, 0xb9, 0xad, 0x01, 0x18, 0x3c, 0xd0, 0x1a, 0xbd, 0x03, 0xf3, 0x35, 0x62, 0x97, 0x8d, + 0x52, 0x8d, 0x6c, 0x13, 0x6a, 0x12, 0xdb, 0xcb, 0xa5, 0x04, 0xe1, 0x62, 0xab, 0x59, 0x98, 0xdf, + 0x88, 0xab, 0x70, 0x37, 0x56, 0xbd, 0x0d, 0xcb, 0x1f, 0xd4, 0x9c, 0xfd, 0x8b, 0x16, 0xf3, 0x2c, + 0xbb, 0xe2, 0x5b, 0xac, 0x4a, 0xe8, 0x26, 0xf1, 0xaa, 0x4e, 0x19, 0xbd, 0x07, 0x69, 0xef, 0xc0, + 0x25, 0xe2, 0x7c, 0x59, 0xfd, 0xf8, 0xc3, 0x66, 0x61, 0xac, 0xd5, 0x2c, 0xa4, 0x6f, 0x1c, 0xb8, + 0xe4, 0x69, 0xb3, 0x70, 0x64, 0x80, 0x19, 0x57, 0x63, 0x61, 0xa8, 0x7e, 0x93, 0x02, 0xe0, 0xa8, + 0x1d, 0x11, 0x38, 0x74, 0x17, 0xa6, 0xf8, 0x63, 0x95, 0x0d, 0xcf, 0x10, 0x9c, 0xd3, 0x67, 0x4e, + 0x69, 0x51, 0xa6, 0x84, 0x31, 0xd7, 0xdc, 0xbd, 0x0a, 0x17, 0x30, 0x8d, 0xa3, 0xb5, 0xc6, 0x69, + 0xed, 0x5a, 0xe9, 0x1e, 0x31, 0xbd, 0x4d, 0xe2, 0x19, 0x3a, 0x92, 0xa7, 0x80, 0x48, 0x86, 0x43, + 0x56, 0xb4, 0x0d, 0x69, 0xe6, 0x12, 0x53, 0x04, 0x60, 0xfa, 0x8c, 0xa6, 0x0d, 0xcf, 0x43, 0x2d, + 0x3a, 0xdb, 0x8e, 0x4b, 0x4c, 0x7d, 0xa6, 0x7d, 0x43, 0xfe, 0x85, 0x05, 0x13, 0xba, 0x05, 0x13, + 0xcc, 0x33, 0x3c, 0x9f, 0xe5, 0xc6, 0x7b, 0x4e, 0x9c, 0xc4, 0x29, 0xec, 0xf4, 0x39, 0xc9, 0x3a, + 0x11, 0x7c, 0x63, 0xc9, 0xa7, 0x3e, 0x4e, 0xc1, 0x62, 0x04, 0x5e, 0x77, 0xec, 0xb2, 0x25, 0x32, + 0xe5, 0xed, 0x58, 0xd4, 0x8f, 0x75, 0x45, 0x7d, 0xb9, 0x8f, 0x49, 0x14, 0x71, 0xf4, 0x66, 0x78, + 0xdc, 0x94, 0x30, 0x3f, 0x1a, 0x77, 0xfe, 0xb4, 0x59, 0x98, 0x0f, 0xcd, 0xe2, 0xe7, 0x41, 0x0d, + 0x40, 0x35, 0x83, 0x79, 0x37, 0xa8, 0x61, 0xb3, 0x80, 0xd6, 0xaa, 0x13, 0x79, 0xeb, 0xd7, 0x46, + 0x7b, 0x27, 0x6e, 0xa1, 0xaf, 0x48, 0x97, 0x68, 0xa3, 0x87, 0x0d, 0xf7, 0xf1, 0x80, 0x5e, 0x86, + 0x09, 0x4a, 0x0c, 0xe6, 0xd8, 0xb9, 0xb4, 0x38, 0x72, 0x18, 0x2f, 0x2c, 0xa4, 0x58, 0x6a, 0xd1, + 0xab, 0x30, 0x59, 0x27, 0x8c, 0x19, 0x15, 0x92, 0xcb, 0x08, 0xe0, 0xbc, 0x04, 0x4e, 0x6e, 0x06, + 0x62, 0xdc, 0xd6, 0xab, 0x3f, 0x2b, 0x30, 0x17, 0xc5, 0x69, 0xc3, 0x62, 0x1e, 0xba, 0xd3, 0x93, + 0x7b, 0xda, 0x68, 0x77, 0xe2, 0xd6, 0x22, 0xf3, 0x16, 0xa4, 0xbb, 0xa9, 0xb6, 0xa4, 0x23, 0xef, + 0xae, 0x41, 0xc6, 0xf2, 0x48, 0x9d, 0x47, 0x7d, 0xbc, 0x2b, 0x5c, 0x09, 0x49, 0xa2, 0xcf, 0x4a, + 0xda, 0xcc, 0x15, 0x4e, 0x80, 0x03, 0x1e, 0xf5, 0xcf, 0xf1, 0xce, 0x1b, 0xf0, 0x7c, 0x44, 0xdf, + 0x2b, 0xb0, 0xe2, 0x0e, 0x6c, 0x30, 0xf2, 0x52, 0xeb, 0x49, 0x9e, 0x07, 0xb7, 0x28, 0x4c, 0x76, + 0x09, 0xef, 0x2b, 0x44, 0x57, 0xe5, 0x91, 0x56, 0x86, 0x80, 0x87, 0x1c, 0x05, 0x7d, 0x04, 0xa8, + 0x6e, 0x78, 0x3c, 0xa2, 0x95, 0x6d, 0x4a, 0x4c, 0x52, 0xe6, 0xac, 0xb2, 0x29, 0x85, 0xd9, 0xb1, + 0xd9, 0x83, 0xc0, 0x7d, 0xac, 0xd0, 0x57, 0x0a, 0x2c, 0x96, 0x7b, 0x9b, 0x8c, 0xcc, 0xcb, 0xf3, + 0xa3, 0x04, 0xba, 0x4f, 0x8f, 0xd2, 0x97, 0x5b, 0xcd, 0xc2, 0x62, 0x1f, 0x05, 0xee, 0xe7, 0x0c, + 0xdd, 0x81, 0x0c, 0xf5, 0x6b, 0x84, 0xe5, 0xd2, 0xe2, 0x79, 0x13, 0xbd, 0x6e, 0x3b, 0x35, 0xcb, + 0x3c, 0xc0, 0xdc, 0xe4, 0x13, 0xcb, 0xab, 0xee, 0xf8, 0xa2, 0x57, 0xb1, 0xe8, 0xad, 0x85, 0x0a, + 0x07, 0xa4, 0xea, 0x03, 0x58, 0xe8, 0x6e, 0x1a, 0xa8, 0x02, 0x60, 0xb6, 0xeb, 0x94, 0x0f, 0x08, + 0xee, 0xf6, 0xf5, 0xd1, 0xb3, 0x2a, 0xac, 0xf1, 0xa8, 0x5f, 0x86, 0x22, 0x86, 0x3b, 0xa8, 0xd5, + 0x53, 0x30, 0x73, 0x99, 0x3a, 0xbe, 0x2b, 0xcf, 0x88, 0x56, 0x21, 0x6d, 0x1b, 0xf5, 0x76, 0xf7, + 0x09, 0x3b, 0xe2, 0x96, 0x51, 0x27, 0x58, 0x68, 0xd4, 0xef, 0x14, 0x98, 0xdd, 0xb0, 0xea, 0x96, + 0x87, 0x09, 0x73, 0x1d, 0x9b, 0x11, 0x74, 0x2e, 0xd6, 0xb1, 0x8e, 0x76, 0x75, 0xac, 0x43, 0x31, + 0x70, 0x47, 0xaf, 0xfa, 0x14, 0x26, 0xef, 0xfb, 0xc4, 0xb7, 0xec, 0x8a, 0xec, 0xd7, 0x67, 0x93, + 0x2e, 0x78, 0x3d, 0x80, 0xc7, 0xb2, 0x4d, 0x9f, 0xe6, 0x2d, 0x40, 0x6a, 0x70, 0x9b, 0x51, 0xfd, + 0x27, 0x05, 0x47, 0x85, 0x63, 0x52, 0x1e, 0x32, 0x95, 0xef, 0x40, 0xce, 0x60, 0xcc, 0xa7, 0xa4, + 0x3c, 0x68, 0x2a, 0xaf, 0xca, 0xdb, 0xe4, 0xd6, 0x06, 0xe0, 0xf0, 0x40, 0x06, 0x74, 0x0f, 0x66, + 0x6b, 0x9d, 0x77, 0x97, 0xd7, 0x3c, 0x99, 0x74, 0xcd, 0x58, 0xc0, 0xf4, 0x25, 0x79, 0x82, 0x78, + 0xd0, 0x71, 0x9c, 0xba, 0xdf, 0x16, 0x30, 0x3e, 0xfa, 0x16, 0x80, 0xae, 0xc1, 0x52, 0xc9, 0xa1, + 0xd4, 0xd9, 0xb7, 0xec, 0x8a, 0xf0, 0xd3, 0x26, 0x49, 0x0b, 0x92, 0xff, 0xb5, 0x9a, 0x85, 0x25, + 0xbd, 0x1f, 0x00, 0xf7, 0xb7, 0x53, 0xf7, 0x61, 0x69, 0x8b, 0xf7, 0x14, 0xe6, 0xf8, 0xd4, 0x24, + 0x51, 0x41, 0xa0, 0x02, 0x64, 0x1a, 0x84, 0x96, 0x82, 0xa4, 0xce, 0xea, 0x59, 0x5e, 0x0e, 0x1f, + 0x73, 0x01, 0x0e, 0xe4, 0xfc, 0x26, 0x76, 0x64, 0x79, 0x13, 0x6f, 0xb0, 0xdc, 0x84, 0x80, 0x8a, + 0x9b, 0x6c, 0xc5, 0x55, 0xb8, 0x1b, 0xab, 0x36, 0x53, 0xb0, 0x3c, 0xa0, 0xfe, 0xd0, 0x4d, 0x98, + 0x62, 0xf2, 0x6f, 0x59, 0x53, 0xc7, 0x92, 0xde, 0x42, 0xda, 0x46, 0xdd, 0xbf, 0x4d, 0x86, 0x43, + 0x2a, 0xe4, 0xc0, 0x2c, 0x95, 0x47, 0x10, 0x3e, 0xe5, 0x14, 0x38, 0x93, 0xc4, 0xdd, 0x1b, 0x9d, + 0xe8, 0xb1, 0x71, 0x27, 0x21, 0x8e, 0xf3, 0xa3, 0x07, 0xb0, 0xd0, 0x71, 0xed, 0xc0, 0xe7, 0xb8, + 0xf0, 0x79, 0x2e, 0xc9, 0x67, 0xdf, 0x47, 0xd1, 0x73, 0xd2, 0xed, 0xc2, 0x56, 0x17, 0x2d, 0xee, + 0x71, 0xa4, 0xfe, 0x9a, 0x82, 0x21, 0x83, 0xe1, 0x05, 0x2c, 0x79, 0x77, 0x63, 0x4b, 0xde, 0xbb, + 0xcf, 0x3e, 0xf1, 0x06, 0x2e, 0x7d, 0xd5, 0xae, 0xa5, 0xef, 0xfd, 0xe7, 0xf0, 0x31, 0x7c, 0x09, + 0xfc, 0x2b, 0x05, 0x2f, 0x0d, 0x36, 0x8e, 0x96, 0xc2, 0xab, 0xb1, 0x16, 0x7b, 0xbe, 0xab, 0xc5, + 0x1e, 0x1b, 0x81, 0xe2, 0xbf, 0x25, 0xb1, 0x6b, 0x49, 0xfc, 0x4d, 0x81, 0xfc, 0xe0, 0xb8, 0xbd, + 0x80, 0xa5, 0xf1, 0xb3, 0xf8, 0xd2, 0xf8, 0xd6, 0xb3, 0x27, 0xd9, 0x80, 0x25, 0xf2, 0xf2, 0xb0, + 0xdc, 0x0a, 0xd7, 0xbd, 0x11, 0x46, 0xfe, 0x0f, 0xa9, 0x61, 0xa1, 0x12, 0xdb, 0x69, 0xc2, 0xaf, + 0x96, 0x98, 0xf5, 0x25, 0x9b, 0x8f, 0x9e, 0x3a, 0x9f, 0x1e, 0x41, 0x42, 0x56, 0x61, 0xb2, 0x16, + 0xcc, 0x6a, 0x59, 0xd4, 0x6b, 0x23, 0x8d, 0xc8, 0x61, 0xa3, 0x3d, 0x58, 0x0b, 0x24, 0x0c, 0xb7, + 0xe9, 0x51, 0x19, 0x26, 0x88, 0xf8, 0xa9, 0x3e, 0x6a, 0x65, 0x27, 0xfd, 0xb0, 0xd7, 0x81, 0x67, + 0x61, 0x80, 0xc2, 0x92, 0x5b, 0xfd, 0x56, 0x81, 0xd5, 0xa4, 0x96, 0x80, 0xf6, 0xfb, 0xac, 0x78, + 0xcf, 0xb1, 0xbe, 0x8f, 0xbe, 0xf2, 0xfd, 0xa8, 0xc0, 0xe1, 0x7e, 0x9b, 0x14, 0x2f, 0x32, 0xbe, + 0x3e, 0x85, 0xbb, 0x4f, 0x58, 0x64, 0xd7, 0x85, 0x14, 0x4b, 0x2d, 0x3a, 0x01, 0x53, 0x55, 0xc3, + 0x2e, 0xef, 0x58, 0x5f, 0xb4, 0xb7, 0xfa, 0x30, 0xcd, 0x3f, 0x94, 0x72, 0x1c, 0x22, 0xd0, 0x45, + 0x58, 0x10, 0x76, 0x1b, 0xc4, 0xae, 0x78, 0x55, 0xf1, 0x22, 0x72, 0x35, 0x09, 0xa7, 0xce, 0xf5, + 0x2e, 0x3d, 0xee, 0xb1, 0x50, 0xff, 0x56, 0x00, 0x3d, 0xcb, 0x36, 0x71, 0x1c, 0xb2, 0x86, 0x6b, + 0x89, 0x15, 0x37, 0x28, 0xb4, 0xac, 0x3e, 0xdb, 0x6a, 0x16, 0xb2, 0x6b, 0xdb, 0x57, 0x02, 0x21, + 0x8e, 0xf4, 0x1c, 0xdc, 0x1e, 0xb4, 0xc1, 0x40, 0x95, 0xe0, 0xb6, 0x63, 0x86, 0x23, 0x3d, 0xba, + 0x00, 0x33, 0x66, 0xcd, 0x67, 0x1e, 0xa1, 0x3b, 0xa6, 0xe3, 0x12, 0xd1, 0x98, 0xa6, 0xf4, 0xc3, + 0xf2, 0x4e, 0x33, 0xeb, 0x1d, 0x3a, 0x1c, 0x43, 0x22, 0x0d, 0x80, 0x97, 0x15, 0x73, 0x0d, 0xee, + 0x27, 0x23, 0xfc, 0xcc, 0xf1, 0x07, 0xdb, 0x0a, 0xa5, 0xb8, 0x03, 0xa1, 0xde, 0x83, 0xa5, 0x1d, + 0x42, 0x1b, 0x96, 0x49, 0xd6, 0x4c, 0xd3, 0xf1, 0x6d, 0xaf, 0xbd, 0xac, 0x17, 0x21, 0x1b, 0xc2, + 0x64, 0xe5, 0x1d, 0x92, 0xfe, 0xb3, 0x21, 0x17, 0x8e, 0x30, 0x61, 0xa9, 0xa7, 0x06, 0x96, 0xfa, + 0x2f, 0x29, 0x98, 0x8c, 0xe8, 0xd3, 0x7b, 0x96, 0x5d, 0x96, 0xcc, 0x47, 0xda, 0xe8, 0xab, 0x96, + 0x5d, 0x7e, 0xda, 0x2c, 0x4c, 0x4b, 0x18, 0xff, 0xc4, 0x02, 0x88, 0xae, 0x40, 0xda, 0x67, 0x84, + 0xca, 0x22, 0x3e, 0x9e, 0x94, 0xcc, 0x37, 0x19, 0xa1, 0xed, 0xfd, 0x6a, 0x8a, 0x33, 0x73, 0x01, + 0x16, 0x14, 0x68, 0x13, 0x32, 0x15, 0xfe, 0x28, 0xb2, 0x4e, 0x4f, 0x24, 0x71, 0x75, 0xfe, 0x88, + 0x09, 0xd2, 0x40, 0x48, 0x70, 0xc0, 0x82, 0xee, 0xc3, 0x1c, 0x8b, 0x85, 0x50, 0x3c, 0xd7, 0x08, + 0xfb, 0x52, 0xdf, 0xc0, 0xeb, 0xa8, 0xd5, 0x2c, 0xcc, 0xc5, 0x55, 0xb8, 0xcb, 0x81, 0x5a, 0x84, + 0xe9, 0x8e, 0x0b, 0x26, 0x77, 0x59, 0xfd, 0xe2, 0xc3, 0x27, 0xf9, 0xb1, 0x47, 0x4f, 0xf2, 0x63, + 0x8f, 0x9f, 0xe4, 0xc7, 0xbe, 0x6c, 0xe5, 0x95, 0x87, 0xad, 0xbc, 0xf2, 0xa8, 0x95, 0x57, 0x1e, + 0xb7, 0xf2, 0xca, 0xef, 0xad, 0xbc, 0xf2, 0xf5, 0x1f, 0xf9, 0xb1, 0xdb, 0xf9, 0xe1, 0xff, 0x8b, + 0xfd, 0x37, 0x00, 0x00, 0xff, 0xff, 0x3a, 0xda, 0x82, 0x48, 0xc5, 0x15, 0x00, 0x00, +} + +func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExemptPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExemptPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LendablePercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LendablePercent)) + i-- + dAtA[i] = 0x10 + } + if m.NominalConcurrencyShares != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.NominalConcurrencyShares)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *FlowDistinguisherMethod) Marshal() (dAtA []byte, err error) { @@ -1154,6 +1223,16 @@ func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (i _ = i var l int _ = l + if m.BorrowingLimitPercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.BorrowingLimitPercent)) + i-- + dAtA[i] = 0x20 + } + if m.LendablePercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LendablePercent)) + i-- + dAtA[i] = 0x18 + } { size, err := m.LimitResponse.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -1477,6 +1556,18 @@ func (m *PriorityLevelConfigurationSpec) MarshalToSizedBuffer(dAtA []byte) (int, _ = i var l int _ = l + if m.Exempt != nil { + { + size, err := m.Exempt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } if m.Limited != nil { { size, err := m.Limited.MarshalToSizedBuffer(dAtA[:i]) @@ -1769,6 +1860,21 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *ExemptPriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NominalConcurrencyShares != nil { + n += 1 + sovGenerated(uint64(*m.NominalConcurrencyShares)) + } + if m.LendablePercent != nil { + n += 1 + sovGenerated(uint64(*m.LendablePercent)) + } + return n +} + func (m *FlowDistinguisherMethod) Size() (n int) { if m == nil { return 0 @@ -1903,6 +2009,12 @@ func (m *LimitedPriorityLevelConfiguration) Size() (n int) { n += 1 + sovGenerated(uint64(m.AssuredConcurrencyShares)) l = m.LimitResponse.Size() n += 1 + l + sovGenerated(uint64(l)) + if m.LendablePercent != nil { + n += 1 + sovGenerated(uint64(*m.LendablePercent)) + } + if m.BorrowingLimitPercent != nil { + n += 1 + sovGenerated(uint64(*m.BorrowingLimitPercent)) + } return n } @@ -2028,6 +2140,10 @@ func (m *PriorityLevelConfigurationSpec) Size() (n int) { l = m.Limited.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Exempt != nil { + l = m.Exempt.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2145,6 +2261,17 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ExemptPriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExemptPriorityLevelConfiguration{`, + `NominalConcurrencyShares:` + valueToStringGenerated(this.NominalConcurrencyShares) + `,`, + `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`, + `}`, + }, "") + return s +} func (this *FlowDistinguisherMethod) String() string { if this == nil { return "nil" @@ -2258,6 +2385,8 @@ func (this *LimitedPriorityLevelConfiguration) String() string { s := strings.Join([]string{`&LimitedPriorityLevelConfiguration{`, `AssuredConcurrencyShares:` + fmt.Sprintf("%v", this.AssuredConcurrencyShares) + `,`, `LimitResponse:` + strings.Replace(strings.Replace(this.LimitResponse.String(), "LimitResponse", "LimitResponse", 1), `&`, ``, 1) + `,`, + `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`, + `BorrowingLimitPercent:` + valueToStringGenerated(this.BorrowingLimitPercent) + `,`, `}`, }, "") return s @@ -2359,6 +2488,7 @@ func (this *PriorityLevelConfigurationSpec) String() string { s := strings.Join([]string{`&PriorityLevelConfigurationSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Limited:` + strings.Replace(this.Limited.String(), "LimitedPriorityLevelConfiguration", "LimitedPriorityLevelConfiguration", 1) + `,`, + `Exempt:` + strings.Replace(this.Exempt.String(), "ExemptPriorityLevelConfiguration", "ExemptPriorityLevelConfiguration", 1) + `,`, `}`, }, "") return s @@ -2446,6 +2576,96 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *ExemptPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NominalConcurrencyShares", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NominalConcurrencyShares = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LendablePercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LendablePercent = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -3542,6 +3762,46 @@ func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LendablePercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LendablePercent = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BorrowingLimitPercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BorrowingLimitPercent = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4485,6 +4745,42 @@ func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exempt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exempt == nil { + m.Exempt = &ExemptPriorityLevelConfiguration{} + } + if err := m.Exempt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto index 474d520df..96df0ace7 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto @@ -28,6 +28,40 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/flowcontrol/v1beta1"; +// ExemptPriorityLevelConfiguration describes the configurable aspects +// of the handling of exempt requests. +// In the mandatory exempt configuration object the values in the fields +// here can be modified by authorized users, unlike the rest of the `spec`. +message ExemptPriorityLevelConfiguration { + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats nominally reserved for this priority level. + // This DOES NOT limit the dispatching from this priority level + // but affects the other priority levels through the borrowing mechanism. + // The server's concurrency limit (ServerCL) is divided among all the + // priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // This field has a default value of zero. + // +optional + optional int32 nominalConcurrencyShares = 1; + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. This value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + optional int32 lendablePercent = 2; +} + // FlowDistinguisherMethod specifies the method of a flow distinguisher. message FlowDistinguisherMethod { // `type` is the type of flow distinguisher method @@ -153,8 +187,8 @@ message LimitResponse { // LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. // It addresses two issues: -// * How are requests for this priority level limited? -// * What should be done with requests that exceed the limit? +// - How are requests for this priority level limited? +// - What should be done with requests that exceed the limit? message LimitedPriorityLevelConfiguration { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this @@ -176,6 +210,35 @@ message LimitedPriorityLevelConfiguration { // `limitResponse` indicates what to do with requests that can not be executed right now optional LimitResponse limitResponse = 2; + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. The value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + optional int32 lendablePercent = 3; + + // `borrowingLimitPercent`, if present, configures a limit on how many + // seats this priority level can borrow from other priority levels. + // The limit is known as this level's BorrowingConcurrencyLimit + // (BorrowingCL) and is a limit on the total number of seats that this + // level may borrow at any one time. + // This field holds the ratio of that limit to the level's nominal + // concurrency limit. When this field is non-nil, it must hold a + // non-negative integer and the limit is calculated as follows. + // + // BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 ) + // + // The value of this field can be more than 100, implying that this + // priority level can borrow a number of seats that is greater than + // its own nominal concurrency limit (NominalCL). + // When this field is left `nil`, the limit is effectively infinite. + // +optional + optional int32 borrowingLimitPercent = 4; } // NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the @@ -303,6 +366,14 @@ message PriorityLevelConfigurationSpec { // This field must be non-empty if and only if `type` is `"Limited"`. // +optional optional LimitedPriorityLevelConfiguration limited = 2; + + // `exempt` specifies how requests are handled for an exempt priority level. + // This field MUST be empty if `type` is `"Limited"`. + // This field MAY be non-empty if `type` is `"Exempt"`. + // If empty and `type` is `"Exempt"` then the default values + // for `ExemptPriorityLevelConfiguration` apply. + // +optional + optional ExemptPriorityLevelConfiguration exempt = 3; } // PriorityLevelConfigurationStatus represents the current state of a "request-priority". diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go index b45732642..9e05ff1a0 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go @@ -77,7 +77,9 @@ const ( // is a boolean false or has an invalid boolean representation // (if the cluster operator sets it to 'false' it will be stomped) // - any changes to the spec made by the cluster operator will be - // stomped. + // stomped, except for changes to the `nominalConcurrencyShares` + // and `lendablePercent` fields of the PriorityLevelConfiguration + // named "exempt". // // The kube-apiserver will apply updates on the suggested configuration if: // - the cluster operator has enabled auto-update by setting the annotation @@ -105,7 +107,7 @@ const ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.20 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,FlowSchema +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,FlowSchema // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -127,7 +129,7 @@ type FlowSchema struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.20 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,FlowSchemaList +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,FlowSchemaList // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -382,7 +384,7 @@ type FlowSchemaConditionType string // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.20 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,PriorityLevelConfiguration +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,PriorityLevelConfiguration // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -403,7 +405,7 @@ type PriorityLevelConfiguration struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.20 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,PriorityLevelConfigurationList +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,PriorityLevelConfigurationList // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { @@ -435,6 +437,14 @@ type PriorityLevelConfigurationSpec struct { // This field must be non-empty if and only if `type` is `"Limited"`. // +optional Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"` + + // `exempt` specifies how requests are handled for an exempt priority level. + // This field MUST be empty if `type` is `"Limited"`. + // This field MAY be non-empty if `type` is `"Exempt"`. + // If empty and `type` is `"Exempt"` then the default values + // for `ExemptPriorityLevelConfiguration` apply. + // +optional + Exempt *ExemptPriorityLevelConfiguration `json:"exempt,omitempty" protobuf:"bytes,3,opt,name=exempt"` } // PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level @@ -451,8 +461,8 @@ const ( // LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. // It addresses two issues: -// * How are requests for this priority level limited? -// * What should be done with requests that exceed the limit? +// - How are requests for this priority level limited? +// - What should be done with requests that exceed the limit? type LimitedPriorityLevelConfiguration struct { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this @@ -474,6 +484,72 @@ type LimitedPriorityLevelConfiguration struct { // `limitResponse` indicates what to do with requests that can not be executed right now LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"` + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. The value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + LendablePercent *int32 `json:"lendablePercent,omitempty" protobuf:"varint,3,opt,name=lendablePercent"` + + // `borrowingLimitPercent`, if present, configures a limit on how many + // seats this priority level can borrow from other priority levels. + // The limit is known as this level's BorrowingConcurrencyLimit + // (BorrowingCL) and is a limit on the total number of seats that this + // level may borrow at any one time. + // This field holds the ratio of that limit to the level's nominal + // concurrency limit. When this field is non-nil, it must hold a + // non-negative integer and the limit is calculated as follows. + // + // BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 ) + // + // The value of this field can be more than 100, implying that this + // priority level can borrow a number of seats that is greater than + // its own nominal concurrency limit (NominalCL). + // When this field is left `nil`, the limit is effectively infinite. + // +optional + BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty" protobuf:"varint,4,opt,name=borrowingLimitPercent"` +} + +// ExemptPriorityLevelConfiguration describes the configurable aspects +// of the handling of exempt requests. +// In the mandatory exempt configuration object the values in the fields +// here can be modified by authorized users, unlike the rest of the `spec`. +type ExemptPriorityLevelConfiguration struct { + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats nominally reserved for this priority level. + // This DOES NOT limit the dispatching from this priority level + // but affects the other priority levels through the borrowing mechanism. + // The server's concurrency limit (ServerCL) is divided among all the + // priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // This field has a default value of zero. + // +optional + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty" protobuf:"varint,1,opt,name=nominalConcurrencyShares"` + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. This value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + LendablePercent *int32 `json:"lendablePercent,omitempty" protobuf:"varint,2,opt,name=lendablePercent"` + // The `BorrowingCL` of an Exempt priority level is implicitly `ServerCL`. + // In other words, an exempt priority level + // has no meaningful limit on how much it borrows. + // There is no explicit representation of that here. } // LimitResponse defines how to handle requests that can not be executed right now. diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go index b3752b6fb..1405f3c3c 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go @@ -24,9 +24,19 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ExemptPriorityLevelConfiguration = map[string]string{ + "": "ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`.", + "nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.", + "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", +} + +func (ExemptPriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_ExemptPriorityLevelConfiguration +} + var map_FlowDistinguisherMethod = map[string]string{ "": "FlowDistinguisherMethod specifies the method of a flow distinguisher.", "type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.", @@ -111,9 +121,11 @@ func (LimitResponse) SwaggerDoc() map[string]string { } var map_LimitedPriorityLevelConfiguration = map[string]string{ - "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", + "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", + "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", + "borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", } func (LimitedPriorityLevelConfiguration) SwaggerDoc() map[string]string { @@ -188,6 +200,7 @@ var map_PriorityLevelConfigurationSpec = map[string]string{ "": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.", "type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.", "limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.", + "exempt": "`exempt` specifies how requests are handled for an exempt priority level. This field MUST be empty if `type` is `\"Limited\"`. This field MAY be non-empty if `type` is `\"Exempt\"`. If empty and `type` is `\"Exempt\"` then the default values for `ExemptPriorityLevelConfiguration` apply.", } func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go index b7b84634a..965d5e55a 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go @@ -25,6 +25,32 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExemptPriorityLevelConfiguration) DeepCopyInto(out *ExemptPriorityLevelConfiguration) { + *out = *in + if in.NominalConcurrencyShares != nil { + in, out := &in.NominalConcurrencyShares, &out.NominalConcurrencyShares + *out = new(int32) + **out = **in + } + if in.LendablePercent != nil { + in, out := &in.LendablePercent, &out.LendablePercent + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExemptPriorityLevelConfiguration. +func (in *ExemptPriorityLevelConfiguration) DeepCopy() *ExemptPriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(ExemptPriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) { *out = *in @@ -212,6 +238,16 @@ func (in *LimitResponse) DeepCopy() *LimitResponse { func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) { *out = *in in.LimitResponse.DeepCopyInto(&out.LimitResponse) + if in.LendablePercent != nil { + in, out := &in.LendablePercent, &out.LendablePercent + *out = new(int32) + **out = **in + } + if in.BorrowingLimitPercent != nil { + in, out := &in.BorrowingLimitPercent, &out.BorrowingLimitPercent + *out = new(int32) + **out = **in + } return } @@ -390,6 +426,11 @@ func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigu *out = new(LimitedPriorityLevelConfiguration) (*in).DeepCopyInto(*out) } + if in.Exempt != nil { + in, out := &in.Exempt, &out.Exempt + *out = new(ExemptPriorityLevelConfiguration) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go index ed1e16c26..59b36b3bf 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go @@ -40,7 +40,7 @@ func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchema"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "FlowSchema"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. @@ -64,7 +64,7 @@ func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchemaList"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "FlowSchemaList"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. @@ -88,7 +88,7 @@ func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfiguration"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfiguration"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. @@ -112,7 +112,7 @@ func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfigurationList"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfigurationList"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go index af42d24fb..7f8ee0850 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go @@ -43,10 +43,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} } +func (*ExemptPriorityLevelConfiguration) ProtoMessage() {} +func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{0} +} +func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExemptPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExemptPriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExemptPriorityLevelConfiguration.Merge(m, src) +} +func (m *ExemptPriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *ExemptPriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ExemptPriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo + func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } func (*FlowDistinguisherMethod) ProtoMessage() {} func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{0} + return fileDescriptor_ed300aa8e672704e, []int{1} } func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,7 +102,7 @@ var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo func (m *FlowSchema) Reset() { *m = FlowSchema{} } func (*FlowSchema) ProtoMessage() {} func (*FlowSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{1} + return fileDescriptor_ed300aa8e672704e, []int{2} } func (m *FlowSchema) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -102,7 +130,7 @@ var xxx_messageInfo_FlowSchema proto.InternalMessageInfo func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } func (*FlowSchemaCondition) ProtoMessage() {} func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{2} + return fileDescriptor_ed300aa8e672704e, []int{3} } func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -130,7 +158,7 @@ var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } func (*FlowSchemaList) ProtoMessage() {} func (*FlowSchemaList) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{3} + return fileDescriptor_ed300aa8e672704e, []int{4} } func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -158,7 +186,7 @@ var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } func (*FlowSchemaSpec) ProtoMessage() {} func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{4} + return fileDescriptor_ed300aa8e672704e, []int{5} } func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,7 +214,7 @@ var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } func (*FlowSchemaStatus) ProtoMessage() {} func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{5} + return fileDescriptor_ed300aa8e672704e, []int{6} } func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -214,7 +242,7 @@ var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo func (m *GroupSubject) Reset() { *m = GroupSubject{} } func (*GroupSubject) ProtoMessage() {} func (*GroupSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{6} + return fileDescriptor_ed300aa8e672704e, []int{7} } func (m *GroupSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -242,7 +270,7 @@ var xxx_messageInfo_GroupSubject proto.InternalMessageInfo func (m *LimitResponse) Reset() { *m = LimitResponse{} } func (*LimitResponse) ProtoMessage() {} func (*LimitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{7} + return fileDescriptor_ed300aa8e672704e, []int{8} } func (m *LimitResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -270,7 +298,7 @@ var xxx_messageInfo_LimitResponse proto.InternalMessageInfo func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{8} + return fileDescriptor_ed300aa8e672704e, []int{9} } func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -298,7 +326,7 @@ var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } func (*NonResourcePolicyRule) ProtoMessage() {} func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{9} + return fileDescriptor_ed300aa8e672704e, []int{10} } func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -326,7 +354,7 @@ var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } func (*PolicyRulesWithSubjects) ProtoMessage() {} func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{10} + return fileDescriptor_ed300aa8e672704e, []int{11} } func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -354,7 +382,7 @@ var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } func (*PriorityLevelConfiguration) ProtoMessage() {} func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{11} + return fileDescriptor_ed300aa8e672704e, []int{12} } func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -382,7 +410,7 @@ var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } func (*PriorityLevelConfigurationCondition) ProtoMessage() {} func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{12} + return fileDescriptor_ed300aa8e672704e, []int{13} } func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -410,7 +438,7 @@ var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInf func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } func (*PriorityLevelConfigurationList) ProtoMessage() {} func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{13} + return fileDescriptor_ed300aa8e672704e, []int{14} } func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -438,7 +466,7 @@ var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } func (*PriorityLevelConfigurationReference) ProtoMessage() {} func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{14} + return fileDescriptor_ed300aa8e672704e, []int{15} } func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -466,7 +494,7 @@ var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInf func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } func (*PriorityLevelConfigurationSpec) ProtoMessage() {} func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{15} + return fileDescriptor_ed300aa8e672704e, []int{16} } func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -494,7 +522,7 @@ var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } func (*PriorityLevelConfigurationStatus) ProtoMessage() {} func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{16} + return fileDescriptor_ed300aa8e672704e, []int{17} } func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -522,7 +550,7 @@ var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } func (*QueuingConfiguration) ProtoMessage() {} func (*QueuingConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{17} + return fileDescriptor_ed300aa8e672704e, []int{18} } func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -550,7 +578,7 @@ var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } func (*ResourcePolicyRule) ProtoMessage() {} func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{18} + return fileDescriptor_ed300aa8e672704e, []int{19} } func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -578,7 +606,7 @@ var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } func (*ServiceAccountSubject) ProtoMessage() {} func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{19} + return fileDescriptor_ed300aa8e672704e, []int{20} } func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -606,7 +634,7 @@ var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{20} + return fileDescriptor_ed300aa8e672704e, []int{21} } func (m *Subject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -634,7 +662,7 @@ var xxx_messageInfo_Subject proto.InternalMessageInfo func (m *UserSubject) Reset() { *m = UserSubject{} } func (*UserSubject) ProtoMessage() {} func (*UserSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{21} + return fileDescriptor_ed300aa8e672704e, []int{22} } func (m *UserSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -660,6 +688,7 @@ func (m *UserSubject) XXX_DiscardUnknown() { var xxx_messageInfo_UserSubject proto.InternalMessageInfo func init() { + proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta2.ExemptPriorityLevelConfiguration") proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowDistinguisherMethod") proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchema") proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchemaCondition") @@ -689,101 +718,142 @@ func init() { } var fileDescriptor_ed300aa8e672704e = []byte{ - // 1497 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcb, 0x73, 0xdb, 0x44, - 0x18, 0x8f, 0x1c, 0x3b, 0x89, 0xbf, 0x3c, 0xbb, 0x69, 0x27, 0x9e, 0x74, 0xc6, 0x4e, 0xc5, 0x0c, - 0x05, 0xda, 0xca, 0x6d, 0x69, 0x69, 0x81, 0xe1, 0x11, 0xa5, 0x50, 0x4a, 0x93, 0x34, 0xdd, 0xb4, - 0xc0, 0x94, 0xce, 0x50, 0x59, 0xde, 0xd8, 0x6a, 0x6c, 0x49, 0xd5, 0xae, 0x9c, 0x09, 0xbd, 0x30, - 0xfc, 0x05, 0x9c, 0xe1, 0xc8, 0x81, 0x3b, 0xff, 0x00, 0x47, 0x3a, 0x9c, 0x7a, 0xec, 0xc9, 0x50, - 0x73, 0xe2, 0xc0, 0x1d, 0x7a, 0x62, 0x76, 0xb5, 0x92, 0x2c, 0xbf, 0xe4, 0x69, 0x67, 0x7a, 0xe2, - 0x66, 0x7d, 0x8f, 0xdf, 0xf7, 0xd8, 0xdf, 0x7e, 0xfb, 0x25, 0x70, 0x75, 0xff, 0x32, 0xd5, 0x2c, - 0xa7, 0xbc, 0xef, 0x57, 0x88, 0x67, 0x13, 0x46, 0x68, 0xb9, 0x45, 0xec, 0xaa, 0xe3, 0x95, 0xa5, - 0xc2, 0x70, 0xad, 0xf2, 0x5e, 0xc3, 0x39, 0x30, 0x1d, 0x9b, 0x79, 0x4e, 0xa3, 0xdc, 0x3a, 0x57, - 0x21, 0xcc, 0x38, 0x5f, 0xae, 0x11, 0x9b, 0x78, 0x06, 0x23, 0x55, 0xcd, 0xf5, 0x1c, 0xe6, 0xa0, - 0x62, 0x60, 0xaf, 0x19, 0xae, 0xa5, 0x75, 0xd9, 0x6b, 0xd2, 0x7e, 0xf5, 0x4c, 0xcd, 0x62, 0x75, - 0xbf, 0xa2, 0x99, 0x4e, 0xb3, 0x5c, 0x73, 0x6a, 0x4e, 0x59, 0xb8, 0x55, 0xfc, 0x3d, 0xf1, 0x25, - 0x3e, 0xc4, 0xaf, 0x00, 0x6e, 0xf5, 0x42, 0x1c, 0xbe, 0x69, 0x98, 0x75, 0xcb, 0x26, 0xde, 0x61, - 0xd9, 0xdd, 0xaf, 0x71, 0x01, 0x2d, 0x37, 0x09, 0x33, 0xca, 0xad, 0x73, 0xbd, 0x49, 0xac, 0x96, - 0x87, 0x79, 0x79, 0xbe, 0xcd, 0xac, 0x26, 0xe9, 0x73, 0x78, 0x2b, 0xcd, 0x81, 0x9a, 0x75, 0xd2, - 0x34, 0x7a, 0xfd, 0xd4, 0x3b, 0xb0, 0xf2, 0x71, 0xc3, 0x39, 0xb8, 0x62, 0x51, 0x66, 0xd9, 0x35, - 0xdf, 0xa2, 0x75, 0xe2, 0x6d, 0x11, 0x56, 0x77, 0xaa, 0xe8, 0x03, 0xc8, 0xb2, 0x43, 0x97, 0x14, - 0x94, 0x35, 0xe5, 0xb5, 0xbc, 0x7e, 0xea, 0x51, 0xbb, 0x34, 0xd1, 0x69, 0x97, 0xb2, 0xb7, 0x0e, - 0x5d, 0xf2, 0xac, 0x5d, 0x3a, 0x3e, 0xc4, 0x8d, 0xab, 0xb1, 0x70, 0x54, 0xbf, 0xcf, 0x00, 0x70, - 0xab, 0x5d, 0x11, 0x1a, 0xdd, 0x83, 0x19, 0x5e, 0x6e, 0xd5, 0x60, 0x86, 0xc0, 0x9c, 0x3d, 0x7f, - 0x56, 0x8b, 0x7b, 0x1d, 0x65, 0xad, 0xb9, 0xfb, 0x35, 0x2e, 0xa0, 0x1a, 0xb7, 0xd6, 0x5a, 0xe7, - 0xb4, 0x1b, 0x95, 0xfb, 0xc4, 0x64, 0x5b, 0x84, 0x19, 0x3a, 0x92, 0x59, 0x40, 0x2c, 0xc3, 0x11, - 0x2a, 0xda, 0x81, 0x2c, 0x75, 0x89, 0x59, 0xc8, 0x08, 0x74, 0x4d, 0x1b, 0x7d, 0x92, 0x5a, 0x9c, - 0xdb, 0xae, 0x4b, 0x4c, 0x7d, 0x2e, 0xac, 0x90, 0x7f, 0x61, 0x81, 0x84, 0xbe, 0x80, 0x29, 0xca, - 0x0c, 0xe6, 0xd3, 0xc2, 0x64, 0x5f, 0xc6, 0x69, 0x98, 0xc2, 0x4f, 0x5f, 0x90, 0xa8, 0x53, 0xc1, - 0x37, 0x96, 0x78, 0xea, 0x93, 0x0c, 0x2c, 0xc7, 0xc6, 0x1b, 0x8e, 0x5d, 0xb5, 0x98, 0xe5, 0xd8, - 0xe8, 0xdd, 0x44, 0xd7, 0x4f, 0xf6, 0x74, 0x7d, 0x65, 0x80, 0x4b, 0xdc, 0x71, 0xf4, 0x76, 0x94, - 0x6e, 0x46, 0xb8, 0x9f, 0x48, 0x06, 0x7f, 0xd6, 0x2e, 0x2d, 0x46, 0x6e, 0xc9, 0x7c, 0x50, 0x0b, - 0x50, 0xc3, 0xa0, 0xec, 0x96, 0x67, 0xd8, 0x34, 0x80, 0xb5, 0x9a, 0x44, 0x56, 0xfd, 0xc6, 0x78, - 0xe7, 0xc4, 0x3d, 0xf4, 0x55, 0x19, 0x12, 0x6d, 0xf6, 0xa1, 0xe1, 0x01, 0x11, 0xd0, 0xab, 0x30, - 0xe5, 0x11, 0x83, 0x3a, 0x76, 0x21, 0x2b, 0x52, 0x8e, 0xfa, 0x85, 0x85, 0x14, 0x4b, 0x2d, 0x7a, - 0x1d, 0xa6, 0x9b, 0x84, 0x52, 0xa3, 0x46, 0x0a, 0x39, 0x61, 0xb8, 0x28, 0x0d, 0xa7, 0xb7, 0x02, - 0x31, 0x0e, 0xf5, 0xea, 0x2f, 0x0a, 0x2c, 0xc4, 0x7d, 0xda, 0xb4, 0x28, 0x43, 0x77, 0xfb, 0xb8, - 0xa7, 0x8d, 0x57, 0x13, 0xf7, 0x16, 0xcc, 0x5b, 0x92, 0xe1, 0x66, 0x42, 0x49, 0x17, 0xef, 0x6e, - 0x40, 0xce, 0x62, 0xa4, 0xc9, 0xbb, 0x3e, 0xd9, 0xd3, 0xae, 0x14, 0x92, 0xe8, 0xf3, 0x12, 0x36, - 0x77, 0x8d, 0x03, 0xe0, 0x00, 0x47, 0xfd, 0x6b, 0xb2, 0xbb, 0x02, 0xce, 0x47, 0xf4, 0x93, 0x02, - 0xab, 0xae, 0x67, 0x39, 0x9e, 0xc5, 0x0e, 0x37, 0x49, 0x8b, 0x34, 0x36, 0x1c, 0x7b, 0xcf, 0xaa, - 0xf9, 0x9e, 0xc1, 0x5b, 0x29, 0x8b, 0xda, 0x48, 0x8b, 0xbc, 0x33, 0x14, 0x01, 0x93, 0x3d, 0xe2, - 0x11, 0xdb, 0x24, 0xba, 0x2a, 0x53, 0x5a, 0x1d, 0x61, 0x3c, 0x22, 0x15, 0xf4, 0x29, 0xa0, 0xa6, - 0xc1, 0x78, 0x47, 0x6b, 0x3b, 0x1e, 0x31, 0x49, 0x95, 0xa3, 0x0a, 0x42, 0xe6, 0x62, 0x76, 0x6c, - 0xf5, 0x59, 0xe0, 0x01, 0x5e, 0xe8, 0x5b, 0x05, 0x96, 0xab, 0xfd, 0x43, 0x46, 0xf2, 0xf2, 0xd2, - 0x38, 0x8d, 0x1e, 0x30, 0xa3, 0xf4, 0x95, 0x4e, 0xbb, 0xb4, 0x3c, 0x40, 0x81, 0x07, 0x05, 0x43, - 0x77, 0x21, 0xe7, 0xf9, 0x0d, 0x42, 0x0b, 0x59, 0x71, 0xbc, 0xa9, 0x51, 0x77, 0x9c, 0x86, 0x65, - 0x1e, 0x62, 0xee, 0xf2, 0xb9, 0xc5, 0xea, 0xbb, 0xbe, 0x98, 0x55, 0x34, 0x3e, 0x6b, 0xa1, 0xc2, - 0x01, 0xa8, 0xfa, 0x10, 0x96, 0x7a, 0x87, 0x06, 0xaa, 0x01, 0x98, 0xe1, 0x3d, 0xa5, 0x05, 0x45, - 0x84, 0x7d, 0x73, 0x7c, 0x56, 0x45, 0x77, 0x3c, 0x9e, 0x97, 0x91, 0x88, 0xe2, 0x2e, 0x68, 0xf5, - 0x2c, 0xcc, 0x5d, 0xf5, 0x1c, 0xdf, 0x95, 0x39, 0xa2, 0x35, 0xc8, 0xda, 0x46, 0x33, 0x9c, 0x3e, - 0xd1, 0x44, 0xdc, 0x36, 0x9a, 0x04, 0x0b, 0x8d, 0xfa, 0xa3, 0x02, 0xf3, 0x9b, 0x56, 0xd3, 0x62, - 0x98, 0x50, 0xd7, 0xb1, 0x29, 0x41, 0x17, 0x13, 0x13, 0xeb, 0x44, 0xcf, 0xc4, 0x3a, 0x92, 0x30, - 0xee, 0x9a, 0x55, 0x5f, 0xc2, 0xf4, 0x03, 0x9f, 0xf8, 0x96, 0x5d, 0x93, 0xf3, 0xfa, 0x42, 0x5a, - 0x81, 0x37, 0x03, 0xf3, 0x04, 0xdb, 0xf4, 0x59, 0x3e, 0x02, 0xa4, 0x06, 0x87, 0x88, 0xea, 0xdf, - 0x0a, 0x9c, 0x10, 0x81, 0x49, 0x75, 0x38, 0x8b, 0xd1, 0x5d, 0x28, 0x18, 0x94, 0xfa, 0x1e, 0xa9, - 0x6e, 0x38, 0xb6, 0xe9, 0x7b, 0x9c, 0xff, 0x87, 0xbb, 0x75, 0xc3, 0x23, 0x54, 0x54, 0x93, 0xd3, - 0xd7, 0x64, 0x35, 0x85, 0xf5, 0x21, 0x76, 0x78, 0x28, 0x02, 0xba, 0x0f, 0xf3, 0x8d, 0xee, 0xda, - 0x65, 0x99, 0x67, 0xd2, 0xca, 0x4c, 0x34, 0x4c, 0x3f, 0x26, 0x33, 0x48, 0x36, 0x1d, 0x27, 0xa1, - 0xd5, 0x03, 0x38, 0xb6, 0xcd, 0xef, 0x30, 0x75, 0x7c, 0xcf, 0x24, 0x31, 0x01, 0x51, 0x09, 0x72, - 0x2d, 0xe2, 0x55, 0x02, 0x12, 0xe5, 0xf5, 0x3c, 0xa7, 0xdf, 0x67, 0x5c, 0x80, 0x03, 0x39, 0x7a, - 0x0f, 0x16, 0xed, 0xd8, 0xf3, 0x36, 0xde, 0xa4, 0x85, 0x29, 0x61, 0xba, 0xdc, 0x69, 0x97, 0x16, - 0xb7, 0x93, 0x2a, 0xdc, 0x6b, 0xab, 0xb6, 0x33, 0xb0, 0x32, 0x84, 0xef, 0xe8, 0x36, 0xcc, 0x50, - 0xf9, 0x5b, 0x72, 0xf8, 0x64, 0x5a, 0xed, 0xd2, 0x37, 0x9e, 0xb6, 0x21, 0x18, 0x8e, 0xa0, 0x90, - 0x03, 0xf3, 0x9e, 0x4c, 0x41, 0xc4, 0x94, 0x53, 0xf7, 0x7c, 0x1a, 0x76, 0x7f, 0x77, 0xe2, 0xe6, - 0xe2, 0x6e, 0x40, 0x9c, 0xc4, 0x47, 0x0f, 0x61, 0xa9, 0xab, 0xec, 0x20, 0xe6, 0xa4, 0x88, 0x79, - 0x31, 0x2d, 0xe6, 0xc0, 0x43, 0xd1, 0x0b, 0x32, 0xec, 0xd2, 0x76, 0x0f, 0x2c, 0xee, 0x0b, 0xa4, - 0xfe, 0x96, 0x81, 0x11, 0x83, 0xf8, 0x25, 0x2c, 0x55, 0xf7, 0x12, 0x4b, 0xd5, 0xfb, 0xcf, 0xff, - 0xc2, 0x0c, 0x5d, 0xb2, 0xea, 0x3d, 0x4b, 0xd6, 0x87, 0x2f, 0x10, 0x63, 0xf4, 0xd2, 0xf5, 0x4f, - 0x06, 0x5e, 0x19, 0xee, 0x1c, 0x2f, 0x61, 0xd7, 0x13, 0x23, 0xed, 0x52, 0xcf, 0x48, 0x3b, 0x39, - 0x06, 0xc4, 0xff, 0x4b, 0x59, 0xcf, 0x52, 0xf6, 0xbb, 0x02, 0xc5, 0xe1, 0x7d, 0x7b, 0x09, 0x4b, - 0xda, 0x57, 0xc9, 0x25, 0xed, 0x9d, 0xe7, 0x27, 0xd9, 0x90, 0xa5, 0xed, 0xea, 0x28, 0x6e, 0x45, - 0xeb, 0xd5, 0x18, 0x4f, 0xec, 0xaf, 0x23, 0x5b, 0x25, 0xb6, 0xc1, 0x94, 0xbf, 0x12, 0x12, 0xde, - 0x1f, 0xd9, 0x46, 0xa5, 0x41, 0x9a, 0xc4, 0x66, 0x92, 0x90, 0x75, 0x98, 0x6e, 0x04, 0x6f, 0xa3, - 0xbc, 0xd4, 0xeb, 0x63, 0x3d, 0x49, 0xa3, 0x9e, 0xd2, 0xe0, 0x19, 0x96, 0x66, 0x38, 0x84, 0x57, - 0x7f, 0x50, 0x60, 0x2d, 0xed, 0xb2, 0xa2, 0x83, 0x01, 0xcb, 0xce, 0x0b, 0x2c, 0xb2, 0xe3, 0x2f, - 0x3f, 0x3f, 0x2b, 0x70, 0x74, 0xd0, 0x4e, 0xc1, 0xe9, 0xcf, 0x17, 0x89, 0x68, 0x0b, 0x88, 0xe8, - 0x7f, 0x53, 0x48, 0xb1, 0xd4, 0xa2, 0xd3, 0x30, 0x53, 0x37, 0xec, 0xea, 0xae, 0xf5, 0x75, 0xb8, - 0xdf, 0x46, 0x04, 0xfc, 0x44, 0xca, 0x71, 0x64, 0x81, 0xae, 0xc0, 0x92, 0xf0, 0xdb, 0x24, 0x76, - 0x8d, 0xd5, 0x45, 0xaf, 0xc4, 0x55, 0xce, 0xc5, 0xef, 0xc1, 0xcd, 0x1e, 0x3d, 0xee, 0xf3, 0x50, - 0xff, 0x55, 0x00, 0x3d, 0xcf, 0x3b, 0x7f, 0x0a, 0xf2, 0x86, 0x6b, 0x89, 0x65, 0x2f, 0xb8, 0x02, - 0x79, 0x7d, 0xbe, 0xd3, 0x2e, 0xe5, 0xd7, 0x77, 0xae, 0x05, 0x42, 0x1c, 0xeb, 0xb9, 0x71, 0xf8, - 0x04, 0x06, 0x4f, 0x9d, 0x34, 0x0e, 0x03, 0x53, 0x1c, 0xeb, 0xd1, 0x65, 0x98, 0x33, 0x1b, 0x3e, - 0x65, 0xc4, 0xdb, 0x35, 0x1d, 0x97, 0x88, 0x91, 0x31, 0xa3, 0x1f, 0x95, 0x35, 0xcd, 0x6d, 0x74, - 0xe9, 0x70, 0xc2, 0x12, 0x69, 0x00, 0x9c, 0xf0, 0xd4, 0x35, 0x78, 0x9c, 0x9c, 0x88, 0xb3, 0xc0, - 0x0f, 0x6c, 0x3b, 0x92, 0xe2, 0x2e, 0x0b, 0xf5, 0x3e, 0x1c, 0xdb, 0x25, 0x5e, 0xcb, 0x32, 0xc9, - 0xba, 0x69, 0x3a, 0xbe, 0xcd, 0xc2, 0xb5, 0xb5, 0x0c, 0xf9, 0xc8, 0x4c, 0xde, 0x89, 0x23, 0x32, - 0x7e, 0x3e, 0xc2, 0xc2, 0xb1, 0x4d, 0x74, 0x09, 0x33, 0xc3, 0x2f, 0x61, 0x06, 0xa6, 0x63, 0xf8, - 0xec, 0xbe, 0x65, 0x57, 0x25, 0xf2, 0xf1, 0xd0, 0xfa, 0xba, 0x65, 0x57, 0x9f, 0xb5, 0x4b, 0xb3, - 0xd2, 0x8c, 0x7f, 0x62, 0x61, 0x88, 0xae, 0x41, 0xd6, 0xa7, 0xc4, 0x93, 0xd7, 0xeb, 0x54, 0x1a, - 0x99, 0x6f, 0x53, 0xe2, 0x85, 0x9b, 0xcf, 0x0c, 0x47, 0xe6, 0x02, 0x2c, 0x20, 0xd0, 0x16, 0xe4, - 0x6a, 0xfc, 0x50, 0xe4, 0xd4, 0x3f, 0x9d, 0x86, 0xd5, 0xbd, 0xce, 0x07, 0x34, 0x10, 0x12, 0x1c, - 0xa0, 0xa0, 0x07, 0xb0, 0x40, 0x13, 0x2d, 0x14, 0xc7, 0x35, 0xc6, 0x26, 0x33, 0xb0, 0xf1, 0x3a, - 0xea, 0xb4, 0x4b, 0x0b, 0x49, 0x15, 0xee, 0x09, 0xa0, 0x96, 0x61, 0xb6, 0xab, 0xc0, 0xf4, 0xf9, - 0xa7, 0x5f, 0x79, 0xf4, 0xb4, 0x38, 0xf1, 0xf8, 0x69, 0x71, 0xe2, 0xc9, 0xd3, 0xe2, 0xc4, 0x37, - 0x9d, 0xa2, 0xf2, 0xa8, 0x53, 0x54, 0x1e, 0x77, 0x8a, 0xca, 0x93, 0x4e, 0x51, 0xf9, 0xa3, 0x53, - 0x54, 0xbe, 0xfb, 0xb3, 0x38, 0x71, 0xa7, 0x38, 0xfa, 0xff, 0x7a, 0xff, 0x05, 0x00, 0x00, 0xff, - 0xff, 0x1e, 0x66, 0x4a, 0x66, 0x11, 0x14, 0x00, 0x00, + // 1617 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4b, 0x73, 0x1b, 0xc5, + 0x16, 0xf6, 0xc8, 0x92, 0x6d, 0x1d, 0x3f, 0xd3, 0x8e, 0xcb, 0xba, 0xce, 0x2d, 0xc9, 0x99, 0x5b, + 0x75, 0x73, 0x2f, 0x49, 0x46, 0x89, 0x49, 0x48, 0x80, 0xe2, 0xe1, 0x71, 0x42, 0x08, 0xb1, 0x1d, + 0xa7, 0x9d, 0x40, 0x2a, 0xa4, 0x8a, 0x8c, 0x46, 0x6d, 0x69, 0x62, 0x69, 0x66, 0xd2, 0x3d, 0x23, + 0x63, 0xb2, 0xa1, 0xf8, 0x05, 0xac, 0x61, 0xc9, 0x82, 0x15, 0x1b, 0xb6, 0x2c, 0x58, 0x92, 0x62, + 0x95, 0x65, 0x56, 0x82, 0x88, 0x15, 0xff, 0x00, 0x52, 0x45, 0x15, 0xd5, 0x3d, 0xad, 0x19, 0x8d, + 0x5e, 0xa3, 0x4a, 0xaa, 0xb2, 0x62, 0xe7, 0x39, 0xe7, 0x3b, 0xdf, 0xe9, 0x3e, 0x7d, 0x5e, 0x32, + 0x5c, 0xd9, 0xbf, 0xc8, 0x34, 0xcb, 0x29, 0xee, 0xfb, 0x25, 0x42, 0x6d, 0xe2, 0x11, 0x56, 0x6c, + 0x10, 0xbb, 0xec, 0xd0, 0xa2, 0x54, 0x18, 0xae, 0x55, 0xdc, 0xab, 0x39, 0x07, 0xa6, 0x63, 0x7b, + 0xd4, 0xa9, 0x15, 0x1b, 0x67, 0x4b, 0xc4, 0x33, 0xd6, 0x8a, 0x15, 0x62, 0x13, 0x6a, 0x78, 0xa4, + 0xac, 0xb9, 0xd4, 0xf1, 0x1c, 0x94, 0x0f, 0xf0, 0x9a, 0xe1, 0x5a, 0x5a, 0x07, 0x5e, 0x93, 0xf8, + 0x95, 0xd3, 0x15, 0xcb, 0xab, 0xfa, 0x25, 0xcd, 0x74, 0xea, 0xc5, 0x8a, 0x53, 0x71, 0x8a, 0xc2, + 0xac, 0xe4, 0xef, 0x89, 0x2f, 0xf1, 0x21, 0xfe, 0x0a, 0xe8, 0x56, 0xce, 0x45, 0xee, 0xeb, 0x86, + 0x59, 0xb5, 0x6c, 0x42, 0x0f, 0x8b, 0xee, 0x7e, 0x85, 0x0b, 0x58, 0xb1, 0x4e, 0x3c, 0xa3, 0xd8, + 0x38, 0xdb, 0x7d, 0x88, 0x95, 0xe2, 0x20, 0x2b, 0xea, 0xdb, 0x9e, 0x55, 0x27, 0x3d, 0x06, 0xaf, + 0x25, 0x19, 0x30, 0xb3, 0x4a, 0xea, 0x46, 0xb7, 0x9d, 0xfa, 0x83, 0x02, 0xab, 0x97, 0x3f, 0x25, + 0x75, 0xd7, 0xdb, 0xa1, 0x96, 0x43, 0x2d, 0xef, 0x70, 0x93, 0x34, 0x48, 0x6d, 0xc3, 0xb1, 0xf7, + 0xac, 0x8a, 0x4f, 0x0d, 0xcf, 0x72, 0x6c, 0x74, 0x1b, 0x72, 0xb6, 0x53, 0xb7, 0x6c, 0x83, 0xcb, + 0x4d, 0x9f, 0x52, 0x62, 0x9b, 0x87, 0xbb, 0x55, 0x83, 0x12, 0x96, 0x53, 0x56, 0x95, 0xff, 0x65, + 0xf4, 0x7f, 0xb7, 0x9a, 0x85, 0xdc, 0xf6, 0x00, 0x0c, 0x1e, 0x68, 0x8d, 0xde, 0x82, 0xf9, 0x1a, + 0xb1, 0xcb, 0x46, 0xa9, 0x46, 0x76, 0x08, 0x35, 0x89, 0xed, 0xe5, 0x52, 0x82, 0x70, 0xb1, 0xd5, + 0x2c, 0xcc, 0x6f, 0xc6, 0x55, 0xb8, 0x1b, 0xab, 0xde, 0x81, 0xe5, 0xf7, 0x6a, 0xce, 0xc1, 0x25, + 0x8b, 0x79, 0x96, 0x5d, 0xf1, 0x2d, 0x56, 0x25, 0x74, 0x8b, 0x78, 0x55, 0xa7, 0x8c, 0xde, 0x81, + 0xb4, 0x77, 0xe8, 0x12, 0x71, 0xbe, 0xac, 0x7e, 0xf2, 0x51, 0xb3, 0x30, 0xd6, 0x6a, 0x16, 0xd2, + 0x37, 0x0f, 0x5d, 0xf2, 0xac, 0x59, 0x38, 0x36, 0xc0, 0x8c, 0xab, 0xb1, 0x30, 0x54, 0xbf, 0x4a, + 0x01, 0x70, 0xd4, 0xae, 0x08, 0x1c, 0xba, 0x07, 0x53, 0xfc, 0xb1, 0xca, 0x86, 0x67, 0x08, 0xce, + 0xe9, 0xb5, 0x33, 0x5a, 0x94, 0x29, 0x61, 0xcc, 0x35, 0x77, 0xbf, 0xc2, 0x05, 0x4c, 0xe3, 0x68, + 0xad, 0x71, 0x56, 0xbb, 0x5e, 0xba, 0x4f, 0x4c, 0x6f, 0x8b, 0x78, 0x86, 0x8e, 0xe4, 0x29, 0x20, + 0x92, 0xe1, 0x90, 0x15, 0xed, 0x40, 0x9a, 0xb9, 0xc4, 0x14, 0x01, 0x98, 0x5e, 0xd3, 0xb4, 0xe1, + 0x79, 0xa8, 0x45, 0x67, 0xdb, 0x75, 0x89, 0xa9, 0xcf, 0xb4, 0x6f, 0xc8, 0xbf, 0xb0, 0x60, 0x42, + 0xb7, 0x61, 0x82, 0x79, 0x86, 0xe7, 0xb3, 0xdc, 0x78, 0xcf, 0x89, 0x93, 0x38, 0x85, 0x9d, 0x3e, + 0x27, 0x59, 0x27, 0x82, 0x6f, 0x2c, 0xf9, 0xd4, 0x27, 0x29, 0x58, 0x8c, 0xc0, 0x1b, 0x8e, 0x5d, + 0xb6, 0x44, 0xa6, 0xbc, 0x19, 0x8b, 0xfa, 0x89, 0xae, 0xa8, 0x2f, 0xf7, 0x31, 0x89, 0x22, 0x8e, + 0x5e, 0x0f, 0x8f, 0x9b, 0x12, 0xe6, 0xc7, 0xe3, 0xce, 0x9f, 0x35, 0x0b, 0xf3, 0xa1, 0x59, 0xfc, + 0x3c, 0xa8, 0x01, 0xa8, 0x66, 0x30, 0xef, 0x26, 0x35, 0x6c, 0x16, 0xd0, 0x5a, 0x75, 0x22, 0x6f, + 0xfd, 0xca, 0x68, 0xef, 0xc4, 0x2d, 0xf4, 0x15, 0xe9, 0x12, 0x6d, 0xf6, 0xb0, 0xe1, 0x3e, 0x1e, + 0xd0, 0x7f, 0x61, 0x82, 0x12, 0x83, 0x39, 0x76, 0x2e, 0x2d, 0x8e, 0x1c, 0xc6, 0x0b, 0x0b, 0x29, + 0x96, 0x5a, 0xf4, 0x7f, 0x98, 0xac, 0x13, 0xc6, 0x8c, 0x0a, 0xc9, 0x65, 0x04, 0x70, 0x5e, 0x02, + 0x27, 0xb7, 0x02, 0x31, 0x6e, 0xeb, 0xd5, 0x1f, 0x15, 0x98, 0x8b, 0xe2, 0xb4, 0x69, 0x31, 0x0f, + 0xdd, 0xed, 0xc9, 0x3d, 0x6d, 0xb4, 0x3b, 0x71, 0x6b, 0x91, 0x79, 0x0b, 0xd2, 0xdd, 0x54, 0x5b, + 0xd2, 0x91, 0x77, 0xd7, 0x21, 0x63, 0x79, 0xa4, 0xce, 0xa3, 0x3e, 0xde, 0x15, 0xae, 0x84, 0x24, + 0xd1, 0x67, 0x25, 0x6d, 0xe6, 0x2a, 0x27, 0xc0, 0x01, 0x8f, 0xfa, 0xfb, 0x78, 0xe7, 0x0d, 0x78, + 0x3e, 0xa2, 0x6f, 0x15, 0x58, 0x71, 0x07, 0x36, 0x18, 0x79, 0xa9, 0x8d, 0x24, 0xcf, 0x83, 0x5b, + 0x14, 0x26, 0x7b, 0x84, 0xf7, 0x15, 0xa2, 0xab, 0xf2, 0x48, 0x2b, 0x43, 0xc0, 0x43, 0x8e, 0x82, + 0x3e, 0x00, 0x54, 0x37, 0x3c, 0x1e, 0xd1, 0xca, 0x0e, 0x25, 0x26, 0x29, 0x73, 0x56, 0xd9, 0x94, + 0xc2, 0xec, 0xd8, 0xea, 0x41, 0xe0, 0x3e, 0x56, 0xe8, 0x0b, 0x05, 0x16, 0xcb, 0xbd, 0x4d, 0x46, + 0xe6, 0xe5, 0x85, 0x51, 0x02, 0xdd, 0xa7, 0x47, 0xe9, 0xcb, 0xad, 0x66, 0x61, 0xb1, 0x8f, 0x02, + 0xf7, 0x73, 0x86, 0xee, 0x42, 0x86, 0xfa, 0x35, 0xc2, 0x72, 0x69, 0xf1, 0xbc, 0x89, 0x5e, 0x77, + 0x9c, 0x9a, 0x65, 0x1e, 0x62, 0x6e, 0xf2, 0x91, 0xe5, 0x55, 0x77, 0x7d, 0xd1, 0xab, 0x58, 0xf4, + 0xd6, 0x42, 0x85, 0x03, 0x52, 0xf5, 0x21, 0x2c, 0x74, 0x37, 0x0d, 0x54, 0x01, 0x30, 0xdb, 0x75, + 0xca, 0x07, 0x04, 0x77, 0xfb, 0xea, 0xe8, 0x59, 0x15, 0xd6, 0x78, 0xd4, 0x2f, 0x43, 0x11, 0xc3, + 0x1d, 0xd4, 0xea, 0x19, 0x98, 0xb9, 0x42, 0x1d, 0xdf, 0x95, 0x67, 0x44, 0xab, 0x90, 0xb6, 0x8d, + 0x7a, 0xbb, 0xfb, 0x84, 0x1d, 0x71, 0xdb, 0xa8, 0x13, 0x2c, 0x34, 0xea, 0x37, 0x0a, 0xcc, 0x6e, + 0x5a, 0x75, 0xcb, 0xc3, 0x84, 0xb9, 0x8e, 0xcd, 0x08, 0x3a, 0x1f, 0xeb, 0x58, 0xc7, 0xbb, 0x3a, + 0xd6, 0x91, 0x18, 0xb8, 0xa3, 0x57, 0x7d, 0x0c, 0x93, 0x0f, 0x7c, 0xe2, 0x5b, 0x76, 0x45, 0xf6, + 0xeb, 0x73, 0x49, 0x17, 0xbc, 0x11, 0xc0, 0x63, 0xd9, 0xa6, 0x4f, 0xf3, 0x16, 0x20, 0x35, 0xb8, + 0xcd, 0xa8, 0xfe, 0x95, 0x82, 0xe3, 0xc2, 0x31, 0x29, 0x0f, 0x99, 0xca, 0x77, 0x21, 0x67, 0x30, + 0xe6, 0x53, 0x52, 0x1e, 0x34, 0x95, 0x57, 0xe5, 0x6d, 0x72, 0xeb, 0x03, 0x70, 0x78, 0x20, 0x03, + 0xba, 0x0f, 0xb3, 0xb5, 0xce, 0xbb, 0xcb, 0x6b, 0x9e, 0x4e, 0xba, 0x66, 0x2c, 0x60, 0xfa, 0x92, + 0x3c, 0x41, 0x3c, 0xe8, 0x38, 0x4e, 0xdd, 0x6f, 0x0b, 0x18, 0x1f, 0x7d, 0x0b, 0x40, 0xd7, 0x61, + 0xa9, 0xe4, 0x50, 0xea, 0x1c, 0x58, 0x76, 0x45, 0xf8, 0x69, 0x93, 0xa4, 0x05, 0xc9, 0xbf, 0x5a, + 0xcd, 0xc2, 0x92, 0xde, 0x0f, 0x80, 0xfb, 0xdb, 0xa9, 0x07, 0xb0, 0xb4, 0xcd, 0x7b, 0x0a, 0x73, + 0x7c, 0x6a, 0x92, 0xa8, 0x20, 0x50, 0x01, 0x32, 0x0d, 0x42, 0x4b, 0x41, 0x52, 0x67, 0xf5, 0x2c, + 0x2f, 0x87, 0x0f, 0xb9, 0x00, 0x07, 0x72, 0x7e, 0x13, 0x3b, 0xb2, 0xbc, 0x85, 0x37, 0x59, 0x6e, + 0x42, 0x40, 0xc5, 0x4d, 0xb6, 0xe3, 0x2a, 0xdc, 0x8d, 0x55, 0x9b, 0x29, 0x58, 0x1e, 0x50, 0x7f, + 0xe8, 0x16, 0x4c, 0x31, 0xf9, 0xb7, 0xac, 0xa9, 0x13, 0x49, 0x6f, 0x21, 0x6d, 0xa3, 0xee, 0xdf, + 0x26, 0xc3, 0x21, 0x15, 0x72, 0x60, 0x96, 0xca, 0x23, 0x08, 0x9f, 0x72, 0x0a, 0xac, 0x25, 0x71, + 0xf7, 0x46, 0x27, 0x7a, 0x6c, 0xdc, 0x49, 0x88, 0xe3, 0xfc, 0xe8, 0x21, 0x2c, 0x74, 0x5c, 0x3b, + 0xf0, 0x39, 0x2e, 0x7c, 0x9e, 0x4f, 0xf2, 0xd9, 0xf7, 0x51, 0xf4, 0x9c, 0x74, 0xbb, 0xb0, 0xdd, + 0x45, 0x8b, 0x7b, 0x1c, 0xa9, 0x3f, 0xa7, 0x60, 0xc8, 0x60, 0x78, 0x09, 0x4b, 0xde, 0xbd, 0xd8, + 0x92, 0xf7, 0xf6, 0xf3, 0x4f, 0xbc, 0x81, 0x4b, 0x5f, 0xb5, 0x6b, 0xe9, 0x7b, 0xf7, 0x05, 0x7c, + 0x0c, 0x5f, 0x02, 0xff, 0x48, 0xc1, 0x7f, 0x06, 0x1b, 0x47, 0x4b, 0xe1, 0xb5, 0x58, 0x8b, 0xbd, + 0xd0, 0xd5, 0x62, 0x4f, 0x8c, 0x40, 0xf1, 0xcf, 0x92, 0xd8, 0xb5, 0x24, 0xfe, 0xa2, 0x40, 0x7e, + 0x70, 0xdc, 0x5e, 0xc2, 0xd2, 0xf8, 0x49, 0x7c, 0x69, 0x7c, 0xe3, 0xf9, 0x93, 0x6c, 0xc0, 0x12, + 0x79, 0x65, 0x58, 0x6e, 0x85, 0xeb, 0xde, 0x08, 0x23, 0xff, 0xbb, 0xd4, 0xb0, 0x50, 0x89, 0xed, + 0x34, 0xe1, 0x57, 0x4b, 0xcc, 0xfa, 0xb2, 0xcd, 0x47, 0x4f, 0x9d, 0x4f, 0x8f, 0x20, 0x21, 0xab, + 0x30, 0x59, 0x0b, 0x66, 0xb5, 0x2c, 0xea, 0xf5, 0x91, 0x46, 0xe4, 0xb0, 0xd1, 0x1e, 0xac, 0x05, + 0x12, 0x86, 0xdb, 0xf4, 0xa8, 0x0c, 0x13, 0x44, 0xfc, 0x54, 0x1f, 0xb5, 0xb2, 0x93, 0x7e, 0xd8, + 0xeb, 0xc0, 0xb3, 0x30, 0x40, 0x61, 0xc9, 0xad, 0x7e, 0xad, 0xc0, 0x6a, 0x52, 0x4b, 0x40, 0x07, + 0x7d, 0x56, 0xbc, 0x17, 0x58, 0xdf, 0x47, 0x5f, 0xf9, 0xbe, 0x57, 0xe0, 0x68, 0xbf, 0x4d, 0x8a, + 0x17, 0x19, 0x5f, 0x9f, 0xc2, 0xdd, 0x27, 0x2c, 0xb2, 0x1b, 0x42, 0x8a, 0xa5, 0x16, 0x9d, 0x82, + 0xa9, 0xaa, 0x61, 0x97, 0x77, 0xad, 0xcf, 0xda, 0x5b, 0x7d, 0x98, 0xe6, 0xef, 0x4b, 0x39, 0x0e, + 0x11, 0xe8, 0x12, 0x2c, 0x08, 0xbb, 0x4d, 0x62, 0x57, 0xbc, 0xaa, 0x78, 0x11, 0xb9, 0x9a, 0x84, + 0x53, 0xe7, 0x46, 0x97, 0x1e, 0xf7, 0x58, 0xa8, 0x7f, 0x2a, 0x80, 0x9e, 0x67, 0x9b, 0x38, 0x09, + 0x59, 0xc3, 0xb5, 0xc4, 0x8a, 0x1b, 0x14, 0x5a, 0x56, 0x9f, 0x6d, 0x35, 0x0b, 0xd9, 0xf5, 0x9d, + 0xab, 0x81, 0x10, 0x47, 0x7a, 0x0e, 0x6e, 0x0f, 0xda, 0x60, 0xa0, 0x4a, 0x70, 0xdb, 0x31, 0xc3, + 0x91, 0x1e, 0x5d, 0x84, 0x19, 0xb3, 0xe6, 0x33, 0x8f, 0xd0, 0x5d, 0xd3, 0x71, 0x89, 0x68, 0x4c, + 0x53, 0xfa, 0x51, 0x79, 0xa7, 0x99, 0x8d, 0x0e, 0x1d, 0x8e, 0x21, 0x91, 0x06, 0xc0, 0xcb, 0x8a, + 0xb9, 0x06, 0xf7, 0x93, 0x11, 0x7e, 0xe6, 0xf8, 0x83, 0x6d, 0x87, 0x52, 0xdc, 0x81, 0x50, 0xef, + 0xc3, 0xd2, 0x2e, 0xa1, 0x0d, 0xcb, 0x24, 0xeb, 0xa6, 0xe9, 0xf8, 0xb6, 0xd7, 0x5e, 0xd6, 0x8b, + 0x90, 0x0d, 0x61, 0xb2, 0xf2, 0x8e, 0x48, 0xff, 0xd9, 0x90, 0x0b, 0x47, 0x98, 0xb0, 0xd4, 0x53, + 0x03, 0x4b, 0xfd, 0xa7, 0x14, 0x4c, 0x46, 0xf4, 0xe9, 0x7d, 0xcb, 0x2e, 0x4b, 0xe6, 0x63, 0x6d, + 0xf4, 0x35, 0xcb, 0x2e, 0x3f, 0x6b, 0x16, 0xa6, 0x25, 0x8c, 0x7f, 0x62, 0x01, 0x44, 0x57, 0x21, + 0xed, 0x33, 0x42, 0x65, 0x11, 0x9f, 0x4c, 0x4a, 0xe6, 0x5b, 0x8c, 0xd0, 0xf6, 0x7e, 0x35, 0xc5, + 0x99, 0xb9, 0x00, 0x0b, 0x0a, 0xb4, 0x05, 0x99, 0x0a, 0x7f, 0x14, 0x59, 0xa7, 0xa7, 0x92, 0xb8, + 0x3a, 0x7f, 0xc4, 0x04, 0x69, 0x20, 0x24, 0x38, 0x60, 0x41, 0x0f, 0x60, 0x8e, 0xc5, 0x42, 0x28, + 0x9e, 0x6b, 0x84, 0x7d, 0xa9, 0x6f, 0xe0, 0x75, 0xd4, 0x6a, 0x16, 0xe6, 0xe2, 0x2a, 0xdc, 0xe5, + 0x40, 0x2d, 0xc2, 0x74, 0xc7, 0x05, 0x93, 0xbb, 0xac, 0x7e, 0xe9, 0xd1, 0xd3, 0xfc, 0xd8, 0xe3, + 0xa7, 0xf9, 0xb1, 0x27, 0x4f, 0xf3, 0x63, 0x9f, 0xb7, 0xf2, 0xca, 0xa3, 0x56, 0x5e, 0x79, 0xdc, + 0xca, 0x2b, 0x4f, 0x5a, 0x79, 0xe5, 0xd7, 0x56, 0x5e, 0xf9, 0xf2, 0xb7, 0xfc, 0xd8, 0x9d, 0xfc, + 0xf0, 0xff, 0xc5, 0xfe, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xfd, 0x4d, 0x1e, 0x25, 0xc5, 0x15, 0x00, + 0x00, +} + +func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExemptPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExemptPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LendablePercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LendablePercent)) + i-- + dAtA[i] = 0x10 + } + if m.NominalConcurrencyShares != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.NominalConcurrencyShares)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *FlowDistinguisherMethod) Marshal() (dAtA []byte, err error) { @@ -1154,6 +1224,16 @@ func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (i _ = i var l int _ = l + if m.BorrowingLimitPercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.BorrowingLimitPercent)) + i-- + dAtA[i] = 0x20 + } + if m.LendablePercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LendablePercent)) + i-- + dAtA[i] = 0x18 + } { size, err := m.LimitResponse.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -1477,6 +1557,18 @@ func (m *PriorityLevelConfigurationSpec) MarshalToSizedBuffer(dAtA []byte) (int, _ = i var l int _ = l + if m.Exempt != nil { + { + size, err := m.Exempt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } if m.Limited != nil { { size, err := m.Limited.MarshalToSizedBuffer(dAtA[:i]) @@ -1769,6 +1861,21 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *ExemptPriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NominalConcurrencyShares != nil { + n += 1 + sovGenerated(uint64(*m.NominalConcurrencyShares)) + } + if m.LendablePercent != nil { + n += 1 + sovGenerated(uint64(*m.LendablePercent)) + } + return n +} + func (m *FlowDistinguisherMethod) Size() (n int) { if m == nil { return 0 @@ -1903,6 +2010,12 @@ func (m *LimitedPriorityLevelConfiguration) Size() (n int) { n += 1 + sovGenerated(uint64(m.AssuredConcurrencyShares)) l = m.LimitResponse.Size() n += 1 + l + sovGenerated(uint64(l)) + if m.LendablePercent != nil { + n += 1 + sovGenerated(uint64(*m.LendablePercent)) + } + if m.BorrowingLimitPercent != nil { + n += 1 + sovGenerated(uint64(*m.BorrowingLimitPercent)) + } return n } @@ -2028,6 +2141,10 @@ func (m *PriorityLevelConfigurationSpec) Size() (n int) { l = m.Limited.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Exempt != nil { + l = m.Exempt.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2145,6 +2262,17 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ExemptPriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExemptPriorityLevelConfiguration{`, + `NominalConcurrencyShares:` + valueToStringGenerated(this.NominalConcurrencyShares) + `,`, + `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`, + `}`, + }, "") + return s +} func (this *FlowDistinguisherMethod) String() string { if this == nil { return "nil" @@ -2258,6 +2386,8 @@ func (this *LimitedPriorityLevelConfiguration) String() string { s := strings.Join([]string{`&LimitedPriorityLevelConfiguration{`, `AssuredConcurrencyShares:` + fmt.Sprintf("%v", this.AssuredConcurrencyShares) + `,`, `LimitResponse:` + strings.Replace(strings.Replace(this.LimitResponse.String(), "LimitResponse", "LimitResponse", 1), `&`, ``, 1) + `,`, + `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`, + `BorrowingLimitPercent:` + valueToStringGenerated(this.BorrowingLimitPercent) + `,`, `}`, }, "") return s @@ -2359,6 +2489,7 @@ func (this *PriorityLevelConfigurationSpec) String() string { s := strings.Join([]string{`&PriorityLevelConfigurationSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Limited:` + strings.Replace(this.Limited.String(), "LimitedPriorityLevelConfiguration", "LimitedPriorityLevelConfiguration", 1) + `,`, + `Exempt:` + strings.Replace(this.Exempt.String(), "ExemptPriorityLevelConfiguration", "ExemptPriorityLevelConfiguration", 1) + `,`, `}`, }, "") return s @@ -2446,6 +2577,96 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *ExemptPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NominalConcurrencyShares", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NominalConcurrencyShares = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LendablePercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LendablePercent = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -3542,6 +3763,46 @@ func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LendablePercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LendablePercent = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BorrowingLimitPercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BorrowingLimitPercent = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4485,6 +4746,42 @@ func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exempt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exempt == nil { + m.Exempt = &ExemptPriorityLevelConfiguration{} + } + if err := m.Exempt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto index 80bbaedea..a8c8a3273 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto @@ -28,6 +28,40 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/flowcontrol/v1beta2"; +// ExemptPriorityLevelConfiguration describes the configurable aspects +// of the handling of exempt requests. +// In the mandatory exempt configuration object the values in the fields +// here can be modified by authorized users, unlike the rest of the `spec`. +message ExemptPriorityLevelConfiguration { + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats nominally reserved for this priority level. + // This DOES NOT limit the dispatching from this priority level + // but affects the other priority levels through the borrowing mechanism. + // The server's concurrency limit (ServerCL) is divided among all the + // priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // This field has a default value of zero. + // +optional + optional int32 nominalConcurrencyShares = 1; + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. This value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + optional int32 lendablePercent = 2; +} + // FlowDistinguisherMethod specifies the method of a flow distinguisher. message FlowDistinguisherMethod { // `type` is the type of flow distinguisher method @@ -153,8 +187,8 @@ message LimitResponse { // LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. // It addresses two issues: -// * How are requests for this priority level limited? -// * What should be done with requests that exceed the limit? +// - How are requests for this priority level limited? +// - What should be done with requests that exceed the limit? message LimitedPriorityLevelConfiguration { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this @@ -176,6 +210,35 @@ message LimitedPriorityLevelConfiguration { // `limitResponse` indicates what to do with requests that can not be executed right now optional LimitResponse limitResponse = 2; + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. The value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + optional int32 lendablePercent = 3; + + // `borrowingLimitPercent`, if present, configures a limit on how many + // seats this priority level can borrow from other priority levels. + // The limit is known as this level's BorrowingConcurrencyLimit + // (BorrowingCL) and is a limit on the total number of seats that this + // level may borrow at any one time. + // This field holds the ratio of that limit to the level's nominal + // concurrency limit. When this field is non-nil, it must hold a + // non-negative integer and the limit is calculated as follows. + // + // BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 ) + // + // The value of this field can be more than 100, implying that this + // priority level can borrow a number of seats that is greater than + // its own nominal concurrency limit (NominalCL). + // When this field is left `nil`, the limit is effectively infinite. + // +optional + optional int32 borrowingLimitPercent = 4; } // NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the @@ -303,6 +366,14 @@ message PriorityLevelConfigurationSpec { // This field must be non-empty if and only if `type` is `"Limited"`. // +optional optional LimitedPriorityLevelConfiguration limited = 2; + + // `exempt` specifies how requests are handled for an exempt priority level. + // This field MUST be empty if `type` is `"Limited"`. + // This field MAY be non-empty if `type` is `"Exempt"`. + // If empty and `type` is `"Exempt"` then the default values + // for `ExemptPriorityLevelConfiguration` apply. + // +optional + optional ExemptPriorityLevelConfiguration exempt = 3; } // PriorityLevelConfigurationStatus represents the current state of a "request-priority". diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/types.go b/vendor/k8s.io/api/flowcontrol/v1beta2/types.go index 408681e99..e8cf7abff 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/types.go @@ -77,7 +77,9 @@ const ( // is a boolean false or has an invalid boolean representation // (if the cluster operator sets it to 'false' it will be stomped) // - any changes to the spec made by the cluster operator will be - // stomped. + // stomped, except for changes to the `nominalConcurrencyShares` + // and `lendablePercent` fields of the PriorityLevelConfiguration + // named "exempt". // // The kube-apiserver will apply updates on the suggested configuration if: // - the cluster operator has enabled auto-update by setting the annotation @@ -105,6 +107,7 @@ const ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.23 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,FlowSchema // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -126,6 +129,7 @@ type FlowSchema struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.23 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,FlowSchemaList // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -380,6 +384,7 @@ type FlowSchemaConditionType string // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.23 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,PriorityLevelConfiguration // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -400,6 +405,7 @@ type PriorityLevelConfiguration struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.23 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,PriorityLevelConfigurationList // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { @@ -431,6 +437,14 @@ type PriorityLevelConfigurationSpec struct { // This field must be non-empty if and only if `type` is `"Limited"`. // +optional Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"` + + // `exempt` specifies how requests are handled for an exempt priority level. + // This field MUST be empty if `type` is `"Limited"`. + // This field MAY be non-empty if `type` is `"Exempt"`. + // If empty and `type` is `"Exempt"` then the default values + // for `ExemptPriorityLevelConfiguration` apply. + // +optional + Exempt *ExemptPriorityLevelConfiguration `json:"exempt,omitempty" protobuf:"bytes,3,opt,name=exempt"` } // PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level @@ -447,8 +461,8 @@ const ( // LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. // It addresses two issues: -// * How are requests for this priority level limited? -// * What should be done with requests that exceed the limit? +// - How are requests for this priority level limited? +// - What should be done with requests that exceed the limit? type LimitedPriorityLevelConfiguration struct { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this @@ -470,6 +484,72 @@ type LimitedPriorityLevelConfiguration struct { // `limitResponse` indicates what to do with requests that can not be executed right now LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"` + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. The value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + LendablePercent *int32 `json:"lendablePercent,omitempty" protobuf:"varint,3,opt,name=lendablePercent"` + + // `borrowingLimitPercent`, if present, configures a limit on how many + // seats this priority level can borrow from other priority levels. + // The limit is known as this level's BorrowingConcurrencyLimit + // (BorrowingCL) and is a limit on the total number of seats that this + // level may borrow at any one time. + // This field holds the ratio of that limit to the level's nominal + // concurrency limit. When this field is non-nil, it must hold a + // non-negative integer and the limit is calculated as follows. + // + // BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 ) + // + // The value of this field can be more than 100, implying that this + // priority level can borrow a number of seats that is greater than + // its own nominal concurrency limit (NominalCL). + // When this field is left `nil`, the limit is effectively infinite. + // +optional + BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty" protobuf:"varint,4,opt,name=borrowingLimitPercent"` +} + +// ExemptPriorityLevelConfiguration describes the configurable aspects +// of the handling of exempt requests. +// In the mandatory exempt configuration object the values in the fields +// here can be modified by authorized users, unlike the rest of the `spec`. +type ExemptPriorityLevelConfiguration struct { + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats nominally reserved for this priority level. + // This DOES NOT limit the dispatching from this priority level + // but affects the other priority levels through the borrowing mechanism. + // The server's concurrency limit (ServerCL) is divided among all the + // priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // This field has a default value of zero. + // +optional + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty" protobuf:"varint,1,opt,name=nominalConcurrencyShares"` + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. This value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + LendablePercent *int32 `json:"lendablePercent,omitempty" protobuf:"varint,2,opt,name=lendablePercent"` + // The `BorrowingCL` of an Exempt priority level is implicitly `ServerCL`. + // In other words, an exempt priority level + // has no meaningful limit on how much it borrows. + // There is no explicit representation of that here. } // LimitResponse defines how to handle requests that can not be executed right now. diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go index 4775a8e99..49a417809 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go @@ -24,9 +24,19 @@ package v1beta2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ExemptPriorityLevelConfiguration = map[string]string{ + "": "ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`.", + "nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.", + "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", +} + +func (ExemptPriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_ExemptPriorityLevelConfiguration +} + var map_FlowDistinguisherMethod = map[string]string{ "": "FlowDistinguisherMethod specifies the method of a flow distinguisher.", "type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.", @@ -111,9 +121,11 @@ func (LimitResponse) SwaggerDoc() map[string]string { } var map_LimitedPriorityLevelConfiguration = map[string]string{ - "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", + "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", + "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", + "borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", } func (LimitedPriorityLevelConfiguration) SwaggerDoc() map[string]string { @@ -188,6 +200,7 @@ var map_PriorityLevelConfigurationSpec = map[string]string{ "": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.", "type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.", "limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.", + "exempt": "`exempt` specifies how requests are handled for an exempt priority level. This field MUST be empty if `type` is `\"Limited\"`. This field MAY be non-empty if `type` is `\"Exempt\"`. If empty and `type` is `\"Exempt\"` then the default values for `ExemptPriorityLevelConfiguration` apply.", } func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.deepcopy.go index e6288a687..e0605b95d 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.deepcopy.go @@ -25,6 +25,32 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExemptPriorityLevelConfiguration) DeepCopyInto(out *ExemptPriorityLevelConfiguration) { + *out = *in + if in.NominalConcurrencyShares != nil { + in, out := &in.NominalConcurrencyShares, &out.NominalConcurrencyShares + *out = new(int32) + **out = **in + } + if in.LendablePercent != nil { + in, out := &in.LendablePercent, &out.LendablePercent + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExemptPriorityLevelConfiguration. +func (in *ExemptPriorityLevelConfiguration) DeepCopy() *ExemptPriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(ExemptPriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) { *out = *in @@ -212,6 +238,16 @@ func (in *LimitResponse) DeepCopy() *LimitResponse { func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) { *out = *in in.LimitResponse.DeepCopyInto(&out.LimitResponse) + if in.LendablePercent != nil { + in, out := &in.LendablePercent, &out.LendablePercent + *out = new(int32) + **out = **in + } + if in.BorrowingLimitPercent != nil { + in, out := &in.BorrowingLimitPercent, &out.BorrowingLimitPercent + *out = new(int32) + **out = **in + } return } @@ -390,6 +426,11 @@ func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigu *out = new(LimitedPriorityLevelConfiguration) (*in).DeepCopyInto(*out) } + if in.Exempt != nil { + in, out := &in.Exempt, &out.Exempt + *out = new(ExemptPriorityLevelConfiguration) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.prerelease-lifecycle.go index 00cefde41..2abad4e19 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.prerelease-lifecycle.go @@ -21,6 +21,10 @@ limitations under the License. package v1beta2 +import ( + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { @@ -33,6 +37,12 @@ func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { return 1, 26 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "FlowSchema"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *FlowSchema) APILifecycleRemoved() (major, minor int) { @@ -51,6 +61,12 @@ func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { return 1, 26 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "FlowSchemaList"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) { @@ -69,6 +85,12 @@ func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int return 1, 26 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfiguration"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) { @@ -87,6 +109,12 @@ func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor return 1, 26 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfigurationList"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) { diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go b/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go new file mode 100644 index 000000000..cd60cfef7 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true + +// +groupName=flowcontrol.apiserver.k8s.io + +// Package v1beta3 holds api types of version v1beta3 for group "flowcontrol.apiserver.k8s.io". +package v1beta3 // import "k8s.io/api/flowcontrol/v1beta3" diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go new file mode 100644 index 000000000..c6598306d --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go @@ -0,0 +1,5663 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto + +package v1beta3 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} } +func (*ExemptPriorityLevelConfiguration) ProtoMessage() {} +func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{0} +} +func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExemptPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExemptPriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExemptPriorityLevelConfiguration.Merge(m, src) +} +func (m *ExemptPriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *ExemptPriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ExemptPriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo + +func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } +func (*FlowDistinguisherMethod) ProtoMessage() {} +func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{1} +} +func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowDistinguisherMethod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowDistinguisherMethod) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowDistinguisherMethod.Merge(m, src) +} +func (m *FlowDistinguisherMethod) XXX_Size() int { + return m.Size() +} +func (m *FlowDistinguisherMethod) XXX_DiscardUnknown() { + xxx_messageInfo_FlowDistinguisherMethod.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo + +func (m *FlowSchema) Reset() { *m = FlowSchema{} } +func (*FlowSchema) ProtoMessage() {} +func (*FlowSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{2} +} +func (m *FlowSchema) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchema.Merge(m, src) +} +func (m *FlowSchema) XXX_Size() int { + return m.Size() +} +func (m *FlowSchema) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchema proto.InternalMessageInfo + +func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } +func (*FlowSchemaCondition) ProtoMessage() {} +func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{3} +} +func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaCondition.Merge(m, src) +} +func (m *FlowSchemaCondition) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaCondition) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo + +func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } +func (*FlowSchemaList) ProtoMessage() {} +func (*FlowSchemaList) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{4} +} +func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaList.Merge(m, src) +} +func (m *FlowSchemaList) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaList) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaList.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo + +func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } +func (*FlowSchemaSpec) ProtoMessage() {} +func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{5} +} +func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaSpec.Merge(m, src) +} +func (m *FlowSchemaSpec) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaSpec) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo + +func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } +func (*FlowSchemaStatus) ProtoMessage() {} +func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{6} +} +func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaStatus.Merge(m, src) +} +func (m *FlowSchemaStatus) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaStatus) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo + +func (m *GroupSubject) Reset() { *m = GroupSubject{} } +func (*GroupSubject) ProtoMessage() {} +func (*GroupSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{7} +} +func (m *GroupSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupSubject.Merge(m, src) +} +func (m *GroupSubject) XXX_Size() int { + return m.Size() +} +func (m *GroupSubject) XXX_DiscardUnknown() { + xxx_messageInfo_GroupSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupSubject proto.InternalMessageInfo + +func (m *LimitResponse) Reset() { *m = LimitResponse{} } +func (*LimitResponse) ProtoMessage() {} +func (*LimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{8} +} +func (m *LimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitResponse.Merge(m, src) +} +func (m *LimitResponse) XXX_Size() int { + return m.Size() +} +func (m *LimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitResponse proto.InternalMessageInfo + +func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } +func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} +func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{9} +} +func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitedPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitedPriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitedPriorityLevelConfiguration.Merge(m, src) +} +func (m *LimitedPriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *LimitedPriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_LimitedPriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo + +func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } +func (*NonResourcePolicyRule) ProtoMessage() {} +func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{10} +} +func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourcePolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourcePolicyRule.Merge(m, src) +} +func (m *NonResourcePolicyRule) XXX_Size() int { + return m.Size() +} +func (m *NonResourcePolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourcePolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo + +func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } +func (*PolicyRulesWithSubjects) ProtoMessage() {} +func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{11} +} +func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRulesWithSubjects) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRulesWithSubjects) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRulesWithSubjects.Merge(m, src) +} +func (m *PolicyRulesWithSubjects) XXX_Size() int { + return m.Size() +} +func (m *PolicyRulesWithSubjects) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRulesWithSubjects.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo + +func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } +func (*PriorityLevelConfiguration) ProtoMessage() {} +func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{12} +} +func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfiguration.Merge(m, src) +} +func (m *PriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } +func (*PriorityLevelConfigurationCondition) ProtoMessage() {} +func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{13} +} +func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationCondition.Merge(m, src) +} +func (m *PriorityLevelConfigurationCondition) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationCondition) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } +func (*PriorityLevelConfigurationList) ProtoMessage() {} +func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{14} +} +func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationList.Merge(m, src) +} +func (m *PriorityLevelConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } +func (*PriorityLevelConfigurationReference) ProtoMessage() {} +func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{15} +} +func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationReference.Merge(m, src) +} +func (m *PriorityLevelConfigurationReference) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationReference) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationReference.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } +func (*PriorityLevelConfigurationSpec) ProtoMessage() {} +func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{16} +} +func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationSpec.Merge(m, src) +} +func (m *PriorityLevelConfigurationSpec) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } +func (*PriorityLevelConfigurationStatus) ProtoMessage() {} +func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{17} +} +func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationStatus.Merge(m, src) +} +func (m *PriorityLevelConfigurationStatus) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo + +func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } +func (*QueuingConfiguration) ProtoMessage() {} +func (*QueuingConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{18} +} +func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueuingConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *QueuingConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueuingConfiguration.Merge(m, src) +} +func (m *QueuingConfiguration) XXX_Size() int { + return m.Size() +} +func (m *QueuingConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_QueuingConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo + +func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } +func (*ResourcePolicyRule) ProtoMessage() {} +func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{19} +} +func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourcePolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePolicyRule.Merge(m, src) +} +func (m *ResourcePolicyRule) XXX_Size() int { + return m.Size() +} +func (m *ResourcePolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo + +func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } +func (*ServiceAccountSubject) ProtoMessage() {} +func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{20} +} +func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccountSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountSubject.Merge(m, src) +} +func (m *ServiceAccountSubject) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountSubject) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{21} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo + +func (m *UserSubject) Reset() { *m = UserSubject{} } +func (*UserSubject) ProtoMessage() {} +func (*UserSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_803504887082f044, []int{22} +} +func (m *UserSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserSubject.Merge(m, src) +} +func (m *UserSubject) XXX_Size() int { + return m.Size() +} +func (m *UserSubject) XXX_DiscardUnknown() { + xxx_messageInfo_UserSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_UserSubject proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta3.ExemptPriorityLevelConfiguration") + proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowDistinguisherMethod") + proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowSchema") + proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowSchemaCondition") + proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowSchemaList") + proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowSchemaSpec") + proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowSchemaStatus") + proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1beta3.GroupSubject") + proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1beta3.LimitResponse") + proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta3.LimitedPriorityLevelConfiguration") + proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1beta3.NonResourcePolicyRule") + proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1beta3.PolicyRulesWithSubjects") + proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta3.PriorityLevelConfiguration") + proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1beta3.PriorityLevelConfigurationCondition") + proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1beta3.PriorityLevelConfigurationList") + proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1beta3.PriorityLevelConfigurationReference") + proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1beta3.PriorityLevelConfigurationSpec") + proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1beta3.PriorityLevelConfigurationStatus") + proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta3.QueuingConfiguration") + proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1beta3.ResourcePolicyRule") + proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1beta3.ServiceAccountSubject") + proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1beta3.Subject") + proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1beta3.UserSubject") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto", fileDescriptor_803504887082f044) +} + +var fileDescriptor_803504887082f044 = []byte{ + // 1604 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcb, 0x73, 0xdb, 0x54, + 0x17, 0x8f, 0x1c, 0x3b, 0x89, 0x4f, 0x9e, 0xbd, 0x69, 0x26, 0xfe, 0xd2, 0x6f, 0xec, 0x54, 0xdf, + 0xcc, 0x57, 0xa0, 0xad, 0xdc, 0x27, 0x2d, 0x30, 0x3c, 0xaa, 0xb4, 0x94, 0xd2, 0x24, 0x4d, 0x6f, + 0x5a, 0xe8, 0x94, 0xce, 0x50, 0x59, 0xbe, 0xb1, 0xd5, 0x58, 0x8f, 0xea, 0x4a, 0x0e, 0xa1, 0x1b, + 0x86, 0xbf, 0x80, 0x35, 0x2c, 0x59, 0xb0, 0x62, 0xc3, 0x96, 0x05, 0x4b, 0x3a, 0xac, 0xba, 0xec, + 0xca, 0x50, 0xb3, 0xe2, 0x3f, 0x80, 0xce, 0x30, 0xc3, 0xdc, 0xab, 0x2b, 0xc9, 0xf2, 0x4b, 0x9e, + 0x74, 0xa6, 0x2b, 0x76, 0xd1, 0x79, 0xfc, 0xce, 0xbd, 0xe7, 0x9e, 0xc7, 0xcf, 0x81, 0xab, 0xbb, + 0x17, 0xa9, 0x62, 0xd8, 0xe5, 0x5d, 0xbf, 0x42, 0x5c, 0x8b, 0x78, 0x84, 0x96, 0x9b, 0xc4, 0xaa, + 0xda, 0x6e, 0x59, 0x28, 0x34, 0xc7, 0x28, 0xef, 0x34, 0xec, 0x3d, 0xdd, 0xb6, 0x3c, 0xd7, 0x6e, + 0x94, 0x9b, 0xa7, 0x2b, 0xc4, 0xd3, 0xce, 0x96, 0x6b, 0xc4, 0x22, 0xae, 0xe6, 0x91, 0xaa, 0xe2, + 0xb8, 0xb6, 0x67, 0xa3, 0x62, 0x60, 0xaf, 0x68, 0x8e, 0xa1, 0x74, 0xd8, 0x2b, 0xc2, 0x7e, 0xe5, + 0x64, 0xcd, 0xf0, 0xea, 0x7e, 0x45, 0xd1, 0x6d, 0xb3, 0x5c, 0xb3, 0x6b, 0x76, 0x99, 0xbb, 0x55, + 0xfc, 0x1d, 0xfe, 0xc5, 0x3f, 0xf8, 0x5f, 0x01, 0xdc, 0xca, 0xb9, 0x38, 0xbc, 0xa9, 0xe9, 0x75, + 0xc3, 0x22, 0xee, 0x7e, 0xd9, 0xd9, 0xad, 0x31, 0x01, 0x2d, 0x9b, 0xc4, 0xd3, 0xca, 0xcd, 0xd3, + 0xdd, 0x87, 0x58, 0x29, 0x0f, 0xf2, 0x72, 0x7d, 0xcb, 0x33, 0x4c, 0xd2, 0xe3, 0xf0, 0x7a, 0x9a, + 0x03, 0xd5, 0xeb, 0xc4, 0xd4, 0xba, 0xfd, 0xe4, 0x1f, 0x25, 0x58, 0xbd, 0xf2, 0x19, 0x31, 0x1d, + 0x6f, 0xcb, 0x35, 0x6c, 0xd7, 0xf0, 0xf6, 0xd7, 0x49, 0x93, 0x34, 0xd6, 0x6c, 0x6b, 0xc7, 0xa8, + 0xf9, 0xae, 0xe6, 0x19, 0xb6, 0x85, 0xee, 0x40, 0xc1, 0xb2, 0x4d, 0xc3, 0xd2, 0x98, 0x5c, 0xf7, + 0x5d, 0x97, 0x58, 0xfa, 0xfe, 0x76, 0x5d, 0x73, 0x09, 0x2d, 0x48, 0xab, 0xd2, 0x2b, 0x39, 0xf5, + 0xbf, 0xed, 0x56, 0xa9, 0xb0, 0x39, 0xc0, 0x06, 0x0f, 0xf4, 0x46, 0x6f, 0xc3, 0x7c, 0x83, 0x58, + 0x55, 0xad, 0xd2, 0x20, 0x5b, 0xc4, 0xd5, 0x89, 0xe5, 0x15, 0x32, 0x1c, 0x70, 0xb1, 0xdd, 0x2a, + 0xcd, 0xaf, 0x27, 0x55, 0xb8, 0xdb, 0x56, 0xbe, 0x0b, 0xcb, 0xef, 0x37, 0xec, 0xbd, 0xcb, 0x06, + 0xf5, 0x0c, 0xab, 0xe6, 0x1b, 0xb4, 0x4e, 0xdc, 0x0d, 0xe2, 0xd5, 0xed, 0x2a, 0x7a, 0x17, 0xb2, + 0xde, 0xbe, 0x43, 0xf8, 0xf9, 0xf2, 0xea, 0xf1, 0xc7, 0xad, 0xd2, 0x58, 0xbb, 0x55, 0xca, 0xde, + 0xda, 0x77, 0xc8, 0xf3, 0x56, 0xe9, 0xc8, 0x00, 0x37, 0xa6, 0xc6, 0xdc, 0x51, 0xfe, 0x3a, 0x03, + 0xc0, 0xac, 0xb6, 0x79, 0xe2, 0xd0, 0x7d, 0x98, 0x62, 0x8f, 0x55, 0xd5, 0x3c, 0x8d, 0x63, 0x4e, + 0x9f, 0x39, 0xa5, 0xc4, 0x95, 0x12, 0xe5, 0x5c, 0x71, 0x76, 0x6b, 0x4c, 0x40, 0x15, 0x66, 0xad, + 0x34, 0x4f, 0x2b, 0x37, 0x2a, 0x0f, 0x88, 0xee, 0x6d, 0x10, 0x4f, 0x53, 0x91, 0x38, 0x05, 0xc4, + 0x32, 0x1c, 0xa1, 0xa2, 0x2d, 0xc8, 0x52, 0x87, 0xe8, 0x3c, 0x01, 0xd3, 0x67, 0x14, 0x65, 0x78, + 0x1d, 0x2a, 0xf1, 0xd9, 0xb6, 0x1d, 0xa2, 0xab, 0x33, 0xe1, 0x0d, 0xd9, 0x17, 0xe6, 0x48, 0xe8, + 0x0e, 0x4c, 0x50, 0x4f, 0xf3, 0x7c, 0x5a, 0x18, 0xef, 0x39, 0x71, 0x1a, 0x26, 0xf7, 0x53, 0xe7, + 0x04, 0xea, 0x44, 0xf0, 0x8d, 0x05, 0x9e, 0xfc, 0x34, 0x03, 0x8b, 0xb1, 0xf1, 0x9a, 0x6d, 0x55, + 0x0d, 0x5e, 0x29, 0x6f, 0x25, 0xb2, 0x7e, 0xac, 0x2b, 0xeb, 0xcb, 0x7d, 0x5c, 0xe2, 0x8c, 0xa3, + 0x37, 0xa2, 0xe3, 0x66, 0xb8, 0xfb, 0xd1, 0x64, 0xf0, 0xe7, 0xad, 0xd2, 0x7c, 0xe4, 0x96, 0x3c, + 0x0f, 0x6a, 0x02, 0x6a, 0x68, 0xd4, 0xbb, 0xe5, 0x6a, 0x16, 0x0d, 0x60, 0x0d, 0x93, 0x88, 0x5b, + 0xbf, 0x36, 0xda, 0x3b, 0x31, 0x0f, 0x75, 0x45, 0x84, 0x44, 0xeb, 0x3d, 0x68, 0xb8, 0x4f, 0x04, + 0xf4, 0x7f, 0x98, 0x70, 0x89, 0x46, 0x6d, 0xab, 0x90, 0xe5, 0x47, 0x8e, 0xf2, 0x85, 0xb9, 0x14, + 0x0b, 0x2d, 0x7a, 0x15, 0x26, 0x4d, 0x42, 0xa9, 0x56, 0x23, 0x85, 0x1c, 0x37, 0x9c, 0x17, 0x86, + 0x93, 0x1b, 0x81, 0x18, 0x87, 0x7a, 0xf9, 0x27, 0x09, 0xe6, 0xe2, 0x3c, 0xad, 0x1b, 0xd4, 0x43, + 0xf7, 0x7a, 0x6a, 0x4f, 0x19, 0xed, 0x4e, 0xcc, 0x9b, 0x57, 0xde, 0x82, 0x08, 0x37, 0x15, 0x4a, + 0x3a, 0xea, 0xee, 0x06, 0xe4, 0x0c, 0x8f, 0x98, 0x2c, 0xeb, 0xe3, 0x5d, 0xe9, 0x4a, 0x29, 0x12, + 0x75, 0x56, 0xc0, 0xe6, 0xae, 0x31, 0x00, 0x1c, 0xe0, 0xc8, 0x7f, 0x8c, 0x77, 0xde, 0x80, 0xd5, + 0x23, 0xfa, 0x4e, 0x82, 0x15, 0x67, 0xe0, 0x80, 0x11, 0x97, 0x5a, 0x4b, 0x8b, 0x3c, 0x78, 0x44, + 0x61, 0xb2, 0x43, 0xd8, 0x5c, 0x21, 0xaa, 0x2c, 0x8e, 0xb4, 0x32, 0xc4, 0x78, 0xc8, 0x51, 0xd0, + 0x87, 0x80, 0x4c, 0xcd, 0x63, 0x19, 0xad, 0x6d, 0xb9, 0x44, 0x27, 0x55, 0x86, 0x2a, 0x86, 0x52, + 0x54, 0x1d, 0x1b, 0x3d, 0x16, 0xb8, 0x8f, 0x17, 0xfa, 0x52, 0x82, 0xc5, 0x6a, 0xef, 0x90, 0x11, + 0x75, 0x79, 0x61, 0x94, 0x44, 0xf7, 0x99, 0x51, 0xea, 0x72, 0xbb, 0x55, 0x5a, 0xec, 0xa3, 0xc0, + 0xfd, 0x82, 0xa1, 0x7b, 0x90, 0x73, 0xfd, 0x06, 0xa1, 0x85, 0x2c, 0x7f, 0xde, 0xd4, 0xa8, 0x5b, + 0x76, 0xc3, 0xd0, 0xf7, 0x31, 0x73, 0xf9, 0xd8, 0xf0, 0xea, 0xdb, 0x3e, 0x9f, 0x55, 0x34, 0x7e, + 0x6b, 0xae, 0xc2, 0x01, 0xa8, 0xfc, 0x08, 0x16, 0xba, 0x87, 0x06, 0xaa, 0x01, 0xe8, 0x61, 0x9f, + 0xb2, 0x05, 0xc1, 0xc2, 0x9e, 0x1d, 0xbd, 0xaa, 0xa2, 0x1e, 0x8f, 0xe7, 0x65, 0x24, 0xa2, 0xb8, + 0x03, 0x5a, 0x3e, 0x05, 0x33, 0x57, 0x5d, 0xdb, 0x77, 0xc4, 0x19, 0xd1, 0x2a, 0x64, 0x2d, 0xcd, + 0x0c, 0xa7, 0x4f, 0x34, 0x11, 0x37, 0x35, 0x93, 0x60, 0xae, 0x91, 0xbf, 0x95, 0x60, 0x76, 0xdd, + 0x30, 0x0d, 0x0f, 0x13, 0xea, 0xd8, 0x16, 0x25, 0xe8, 0x7c, 0x62, 0x62, 0x1d, 0xed, 0x9a, 0x58, + 0x87, 0x12, 0xc6, 0x1d, 0xb3, 0xea, 0x13, 0x98, 0x7c, 0xe8, 0x13, 0xdf, 0xb0, 0x6a, 0x62, 0x5e, + 0x9f, 0x4b, 0xbb, 0xe0, 0xcd, 0xc0, 0x3c, 0x51, 0x6d, 0xea, 0x34, 0x1b, 0x01, 0x42, 0x83, 0x43, + 0x44, 0xf9, 0xef, 0x0c, 0x1c, 0xe5, 0x81, 0x49, 0x75, 0xc8, 0x56, 0xbe, 0x97, 0xba, 0x95, 0x57, + 0xc5, 0x6d, 0x0e, 0xb2, 0x99, 0x1f, 0xc0, 0x6c, 0xa3, 0xf3, 0xee, 0xe2, 0x9a, 0x27, 0xd3, 0xae, + 0x99, 0x48, 0x98, 0xba, 0x24, 0x4e, 0x90, 0x4c, 0x3a, 0x4e, 0x42, 0xf7, 0x63, 0x01, 0xe3, 0xa3, + 0xb3, 0x00, 0x74, 0x03, 0x96, 0x2a, 0xb6, 0xeb, 0xda, 0x7b, 0x86, 0x55, 0xe3, 0x71, 0x42, 0x90, + 0x2c, 0x07, 0xf9, 0x4f, 0xbb, 0x55, 0x5a, 0x52, 0xfb, 0x19, 0xe0, 0xfe, 0x7e, 0xf2, 0x1e, 0x2c, + 0x6d, 0xb2, 0x99, 0x42, 0x6d, 0xdf, 0xd5, 0x49, 0xdc, 0x10, 0xa8, 0x04, 0xb9, 0x26, 0x71, 0x2b, + 0x41, 0x51, 0xe7, 0xd5, 0x3c, 0x6b, 0x87, 0x8f, 0x98, 0x00, 0x07, 0x72, 0x76, 0x13, 0x2b, 0xf6, + 0xbc, 0x8d, 0xd7, 0x69, 0x61, 0x82, 0x9b, 0xf2, 0x9b, 0x6c, 0x26, 0x55, 0xb8, 0xdb, 0x56, 0x6e, + 0x65, 0x60, 0x79, 0x40, 0xff, 0xa1, 0xdb, 0x30, 0x45, 0xc5, 0xdf, 0xa2, 0xa7, 0x8e, 0xa5, 0xbd, + 0x85, 0xf0, 0x8d, 0xa7, 0x7f, 0x08, 0x86, 0x23, 0x28, 0x64, 0xc3, 0xac, 0x2b, 0x8e, 0xc0, 0x63, + 0x8a, 0x2d, 0x70, 0x26, 0x0d, 0xbb, 0x37, 0x3b, 0xf1, 0x63, 0xe3, 0x4e, 0x40, 0x9c, 0xc4, 0x47, + 0x8f, 0x60, 0xa1, 0xe3, 0xda, 0x41, 0xcc, 0x71, 0x1e, 0xf3, 0x7c, 0x5a, 0xcc, 0xbe, 0x8f, 0xa2, + 0x16, 0x44, 0xd8, 0x85, 0xcd, 0x2e, 0x58, 0xdc, 0x13, 0x48, 0xfe, 0x25, 0x03, 0x43, 0x16, 0xc3, + 0x4b, 0x20, 0x79, 0xf7, 0x13, 0x24, 0xef, 0x9d, 0x83, 0x6f, 0xbc, 0x81, 0xa4, 0xaf, 0xde, 0x45, + 0xfa, 0xde, 0x7b, 0x81, 0x18, 0xc3, 0x49, 0xe0, 0x9f, 0x19, 0xf8, 0xdf, 0x60, 0xe7, 0x98, 0x14, + 0x5e, 0x4f, 0x8c, 0xd8, 0x0b, 0x5d, 0x23, 0xf6, 0xd8, 0x08, 0x10, 0xff, 0x92, 0xc4, 0x2e, 0x92, + 0xf8, 0xab, 0x04, 0xc5, 0xc1, 0x79, 0x7b, 0x09, 0xa4, 0xf1, 0xd3, 0x24, 0x69, 0x7c, 0xf3, 0xe0, + 0x45, 0x36, 0x80, 0x44, 0x5e, 0x1d, 0x56, 0x5b, 0x11, 0xdd, 0x1b, 0x61, 0xe5, 0x7f, 0x9f, 0x19, + 0x96, 0x2a, 0xce, 0x4e, 0x53, 0x7e, 0xb5, 0x24, 0xbc, 0xaf, 0x58, 0x6c, 0xf5, 0x98, 0x6c, 0x7b, + 0x04, 0x05, 0x59, 0x87, 0xc9, 0x46, 0xb0, 0xab, 0x45, 0x53, 0x5f, 0x1a, 0x69, 0x45, 0x0e, 0x5b, + 0xed, 0x01, 0x2d, 0x10, 0x66, 0x38, 0x84, 0x47, 0x55, 0x98, 0x20, 0xfc, 0xa7, 0xfa, 0xa8, 0x9d, + 0x9d, 0xf6, 0xc3, 0x5e, 0x05, 0x56, 0x85, 0x81, 0x15, 0x16, 0xd8, 0xf2, 0x37, 0x12, 0xac, 0xa6, + 0x8d, 0x04, 0xb4, 0xd7, 0x87, 0xe2, 0xbd, 0x00, 0x7d, 0x1f, 0x9d, 0xf2, 0xfd, 0x20, 0xc1, 0xe1, + 0x7e, 0x4c, 0x8a, 0x35, 0x19, 0xa3, 0x4f, 0x11, 0xf7, 0x89, 0x9a, 0xec, 0x26, 0x97, 0x62, 0xa1, + 0x45, 0x27, 0x60, 0xaa, 0xae, 0x59, 0xd5, 0x6d, 0xe3, 0xf3, 0x90, 0xd5, 0x47, 0x65, 0xfe, 0x81, + 0x90, 0xe3, 0xc8, 0x02, 0x5d, 0x86, 0x05, 0xee, 0xb7, 0x4e, 0xac, 0x9a, 0x57, 0xe7, 0x2f, 0x22, + 0xa8, 0x49, 0xb4, 0x75, 0x6e, 0x76, 0xe9, 0x71, 0x8f, 0x87, 0xfc, 0x97, 0x04, 0xe8, 0x20, 0x6c, + 0xe2, 0x38, 0xe4, 0x35, 0xc7, 0xe0, 0x14, 0x37, 0x68, 0xb4, 0xbc, 0x3a, 0xdb, 0x6e, 0x95, 0xf2, + 0x97, 0xb6, 0xae, 0x05, 0x42, 0x1c, 0xeb, 0x99, 0x71, 0xb8, 0x68, 0x83, 0x85, 0x2a, 0x8c, 0xc3, + 0xc0, 0x14, 0xc7, 0x7a, 0x74, 0x11, 0x66, 0xf4, 0x86, 0x4f, 0x3d, 0xe2, 0x6e, 0xeb, 0xb6, 0x43, + 0xf8, 0x60, 0x9a, 0x52, 0x0f, 0x8b, 0x3b, 0xcd, 0xac, 0x75, 0xe8, 0x70, 0xc2, 0x12, 0x29, 0x00, + 0xac, 0xad, 0xa8, 0xa3, 0xb1, 0x38, 0x39, 0x1e, 0x67, 0x8e, 0x3d, 0xd8, 0x66, 0x24, 0xc5, 0x1d, + 0x16, 0xf2, 0x03, 0x58, 0xda, 0x26, 0x6e, 0xd3, 0xd0, 0xc9, 0x25, 0x5d, 0xb7, 0x7d, 0xcb, 0x0b, + 0xc9, 0x7a, 0x19, 0xf2, 0x91, 0x99, 0xe8, 0xbc, 0x43, 0x22, 0x7e, 0x3e, 0xc2, 0xc2, 0xb1, 0x4d, + 0xd4, 0xea, 0x99, 0x81, 0xad, 0xfe, 0x73, 0x06, 0x26, 0x63, 0xf8, 0xec, 0xae, 0x61, 0x55, 0x05, + 0xf2, 0x91, 0xd0, 0xfa, 0xba, 0x61, 0x55, 0x9f, 0xb7, 0x4a, 0xd3, 0xc2, 0x8c, 0x7d, 0x62, 0x6e, + 0x88, 0xae, 0x41, 0xd6, 0xa7, 0xc4, 0x15, 0x4d, 0x7c, 0x3c, 0xad, 0x98, 0x6f, 0x53, 0xe2, 0x86, + 0xfc, 0x6a, 0x8a, 0x21, 0x33, 0x01, 0xe6, 0x10, 0x68, 0x03, 0x72, 0x35, 0xf6, 0x28, 0xa2, 0x4f, + 0x4f, 0xa4, 0x61, 0x75, 0xfe, 0x88, 0x09, 0xca, 0x80, 0x4b, 0x70, 0x80, 0x82, 0x1e, 0xc2, 0x1c, + 0x4d, 0xa4, 0x90, 0x3f, 0xd7, 0x08, 0x7c, 0xa9, 0x6f, 0xe2, 0x55, 0xd4, 0x6e, 0x95, 0xe6, 0x92, + 0x2a, 0xdc, 0x15, 0x40, 0x2e, 0xc3, 0x74, 0xc7, 0x05, 0xd3, 0xa7, 0xac, 0x7a, 0xf9, 0xf1, 0xb3, + 0xe2, 0xd8, 0x93, 0x67, 0xc5, 0xb1, 0xa7, 0xcf, 0x8a, 0x63, 0x5f, 0xb4, 0x8b, 0xd2, 0xe3, 0x76, + 0x51, 0x7a, 0xd2, 0x2e, 0x4a, 0x4f, 0xdb, 0x45, 0xe9, 0xb7, 0x76, 0x51, 0xfa, 0xea, 0xf7, 0xe2, + 0xd8, 0xdd, 0xe2, 0xf0, 0xff, 0xc5, 0xfe, 0x13, 0x00, 0x00, 0xff, 0xff, 0x1d, 0xc5, 0x22, 0x46, + 0xc5, 0x15, 0x00, 0x00, +} + +func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExemptPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExemptPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LendablePercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LendablePercent)) + i-- + dAtA[i] = 0x10 + } + if m.NominalConcurrencyShares != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.NominalConcurrencyShares)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *FlowDistinguisherMethod) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowDistinguisherMethod) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowDistinguisherMethod) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchema) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchema) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchema) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.DistinguisherMethod != nil { + { + size, err := m.DistinguisherMethod.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MatchingPrecedence)) + i-- + dAtA[i] = 0x10 + { + size, err := m.PriorityLevelConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GroupSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Queuing != nil { + { + size, err := m.Queuing.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitedPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitedPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BorrowingLimitPercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.BorrowingLimitPercent)) + i-- + dAtA[i] = 0x20 + } + if m.LendablePercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LendablePercent)) + i-- + dAtA[i] = 0x18 + } + { + size, err := m.LimitResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.NominalConcurrencyShares)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *NonResourcePolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonResourcePolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourcePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PolicyRulesWithSubjects) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyRulesWithSubjects) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRulesWithSubjects) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceRules) > 0 { + for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Exempt != nil { + { + size, err := m.Exempt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Limited != nil { + { + size, err := m.Limited.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueuingConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueuingConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueuingConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.QueueLengthLimit)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.HandSize)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Queues)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ResourcePolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourcePolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourcePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i-- + if m.ClusterScope { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceAccountSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccountSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Subject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Subject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ServiceAccount != nil { + { + size, err := m.ServiceAccount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Group != nil { + { + size, err := m.Group.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.User != nil { + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UserSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ExemptPriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NominalConcurrencyShares != nil { + n += 1 + sovGenerated(uint64(*m.NominalConcurrencyShares)) + } + if m.LendablePercent != nil { + n += 1 + sovGenerated(uint64(*m.LendablePercent)) + } + return n +} + +func (m *FlowDistinguisherMethod) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchema) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchemaCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchemaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FlowSchemaSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PriorityLevelConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MatchingPrecedence)) + if m.DistinguisherMethod != nil { + l = m.DistinguisherMethod.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FlowSchemaStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GroupSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Queuing != nil { + l = m.Queuing.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *LimitedPriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.NominalConcurrencyShares)) + l = m.LimitResponse.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.LendablePercent != nil { + n += 1 + sovGenerated(uint64(*m.LendablePercent)) + } + if m.BorrowingLimitPercent != nil { + n += 1 + sovGenerated(uint64(*m.BorrowingLimitPercent)) + } + return n +} + +func (m *NonResourcePolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRulesWithSubjects) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceRules) > 0 { + for _, e := range m.NonResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PriorityLevelConfigurationReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Limited != nil { + l = m.Limited.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Exempt != nil { + l = m.Exempt.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PriorityLevelConfigurationStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *QueuingConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Queues)) + n += 1 + sovGenerated(uint64(m.HandSize)) + n += 1 + sovGenerated(uint64(m.QueueLengthLimit)) + return n +} + +func (m *ResourcePolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceAccountSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if m.User != nil { + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Group != nil { + l = m.Group.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ServiceAccount != nil { + l = m.ServiceAccount.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *UserSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ExemptPriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExemptPriorityLevelConfiguration{`, + `NominalConcurrencyShares:` + valueToStringGenerated(this.NominalConcurrencyShares) + `,`, + `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`, + `}`, + }, "") + return s +} +func (this *FlowDistinguisherMethod) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowDistinguisherMethod{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchema) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowSchema{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "FlowSchemaSpec", "FlowSchemaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "FlowSchemaStatus", "FlowSchemaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowSchemaCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]FlowSchema{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "FlowSchema", "FlowSchema", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&FlowSchemaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRulesWithSubjects{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRulesWithSubjects", "PolicyRulesWithSubjects", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&FlowSchemaSpec{`, + `PriorityLevelConfiguration:` + strings.Replace(strings.Replace(this.PriorityLevelConfiguration.String(), "PriorityLevelConfigurationReference", "PriorityLevelConfigurationReference", 1), `&`, ``, 1) + `,`, + `MatchingPrecedence:` + fmt.Sprintf("%v", this.MatchingPrecedence) + `,`, + `DistinguisherMethod:` + strings.Replace(this.DistinguisherMethod.String(), "FlowDistinguisherMethod", "FlowDistinguisherMethod", 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]FlowSchemaCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "FlowSchemaCondition", "FlowSchemaCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&FlowSchemaStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *GroupSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupSubject{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *LimitResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitResponse{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Queuing:` + strings.Replace(this.Queuing.String(), "QueuingConfiguration", "QueuingConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitedPriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitedPriorityLevelConfiguration{`, + `NominalConcurrencyShares:` + fmt.Sprintf("%v", this.NominalConcurrencyShares) + `,`, + `LimitResponse:` + strings.Replace(strings.Replace(this.LimitResponse.String(), "LimitResponse", "LimitResponse", 1), `&`, ``, 1) + `,`, + `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`, + `BorrowingLimitPercent:` + valueToStringGenerated(this.BorrowingLimitPercent) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourcePolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourcePolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRulesWithSubjects) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" + repeatedStringForResourceRules := "[]ResourcePolicyRule{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourcePolicyRule", "ResourcePolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForNonResourceRules := "[]NonResourcePolicyRule{" + for _, f := range this.NonResourceRules { + repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourcePolicyRule", "NonResourcePolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForNonResourceRules += "}" + s := strings.Join([]string{`&PolicyRulesWithSubjects{`, + `Subjects:` + repeatedStringForSubjects + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PriorityLevelConfigurationSpec", "PriorityLevelConfigurationSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PriorityLevelConfigurationStatus", "PriorityLevelConfigurationStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PriorityLevelConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityLevelConfiguration", "PriorityLevelConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PriorityLevelConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Limited:` + strings.Replace(this.Limited.String(), "LimitedPriorityLevelConfiguration", "LimitedPriorityLevelConfiguration", 1) + `,`, + `Exempt:` + strings.Replace(this.Exempt.String(), "ExemptPriorityLevelConfiguration", "ExemptPriorityLevelConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]PriorityLevelConfigurationCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PriorityLevelConfigurationCondition", "PriorityLevelConfigurationCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&PriorityLevelConfigurationStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *QueuingConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueuingConfiguration{`, + `Queues:` + fmt.Sprintf("%v", this.Queues) + `,`, + `HandSize:` + fmt.Sprintf("%v", this.HandSize) + `,`, + `QueueLengthLimit:` + fmt.Sprintf("%v", this.QueueLengthLimit) + `,`, + `}`, + }, "") + return s +} +func (this *ResourcePolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourcePolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ClusterScope:` + fmt.Sprintf("%v", this.ClusterScope) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountSubject{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Subject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Subject{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `User:` + strings.Replace(this.User.String(), "UserSubject", "UserSubject", 1) + `,`, + `Group:` + strings.Replace(this.Group.String(), "GroupSubject", "GroupSubject", 1) + `,`, + `ServiceAccount:` + strings.Replace(this.ServiceAccount.String(), "ServiceAccountSubject", "ServiceAccountSubject", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UserSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserSubject{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ExemptPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NominalConcurrencyShares", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NominalConcurrencyShares = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LendablePercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LendablePercent = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowDistinguisherMethod: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowDistinguisherMethod: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = FlowDistinguisherMethodType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchema) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchema: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchema: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = FlowSchemaConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, FlowSchema{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PriorityLevelConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PriorityLevelConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchingPrecedence", wireType) + } + m.MatchingPrecedence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MatchingPrecedence |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DistinguisherMethod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DistinguisherMethod == nil { + m.DistinguisherMethod = &FlowDistinguisherMethod{} + } + if err := m.DistinguisherMethod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRulesWithSubjects{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, FlowSchemaCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = LimitResponseType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Queuing", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Queuing == nil { + m.Queuing = &QueuingConfiguration{} + } + if err := m.Queuing.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitedPriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitedPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NominalConcurrencyShares", wireType) + } + m.NominalConcurrencyShares = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NominalConcurrencyShares |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LimitResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LendablePercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LendablePercent = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BorrowingLimitPercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BorrowingLimitPercent = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourcePolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourcePolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourcePolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRulesWithSubjects) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRulesWithSubjects: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRulesWithSubjects: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, ResourcePolicyRule{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceRules = append(m.NonResourceRules, NonResourcePolicyRule{}) + if err := m.NonResourceRules[len(m.NonResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PriorityLevelConfigurationConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PriorityLevelConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PriorityLevelEnablement(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limited", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limited == nil { + m.Limited = &LimitedPriorityLevelConfiguration{} + } + if err := m.Limited.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exempt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exempt == nil { + m.Exempt = &ExemptPriorityLevelConfiguration{} + } + if err := m.Exempt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, PriorityLevelConfigurationCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueuingConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueuingConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueuingConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Queues", wireType) + } + m.Queues = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Queues |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HandSize", wireType) + } + m.HandSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HandSize |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueLengthLimit", wireType) + } + m.QueueLengthLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueueLengthLimit |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourcePolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourcePolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourcePolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClusterScope = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = SubjectKind(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.User == nil { + m.User = &UserSubject{} + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Group == nil { + m.Group = &GroupSubject{} + } + if err := m.Group.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceAccount == nil { + m.ServiceAccount = &ServiceAccountSubject{} + } + if err := m.ServiceAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto new file mode 100644 index 000000000..eda0f7829 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto @@ -0,0 +1,515 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.flowcontrol.v1beta3; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/flowcontrol/v1beta3"; + +// ExemptPriorityLevelConfiguration describes the configurable aspects +// of the handling of exempt requests. +// In the mandatory exempt configuration object the values in the fields +// here can be modified by authorized users, unlike the rest of the `spec`. +message ExemptPriorityLevelConfiguration { + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats nominally reserved for this priority level. + // This DOES NOT limit the dispatching from this priority level + // but affects the other priority levels through the borrowing mechanism. + // The server's concurrency limit (ServerCL) is divided among all the + // priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // This field has a default value of zero. + // +optional + optional int32 nominalConcurrencyShares = 1; + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. This value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + optional int32 lendablePercent = 2; +} + +// FlowDistinguisherMethod specifies the method of a flow distinguisher. +message FlowDistinguisherMethod { + // `type` is the type of flow distinguisher method + // The supported types are "ByUser" and "ByNamespace". + // Required. + optional string type = 1; +} + +// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with +// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". +message FlowSchema { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // `spec` is the specification of the desired behavior of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional FlowSchemaSpec spec = 2; + + // `status` is the current status of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional FlowSchemaStatus status = 3; +} + +// FlowSchemaCondition describes conditions for a FlowSchema. +message FlowSchemaCondition { + // `type` is the type of the condition. + // Required. + optional string type = 1; + + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + optional string status = 2; + + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + optional string reason = 4; + + // `message` is a human-readable message indicating details about last transition. + optional string message = 5; +} + +// FlowSchemaList is a list of FlowSchema objects. +message FlowSchemaList { + // `metadata` is the standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // `items` is a list of FlowSchemas. + repeated FlowSchema items = 2; +} + +// FlowSchemaSpec describes how the FlowSchema's specification looks like. +message FlowSchemaSpec { + // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot + // be resolved, the FlowSchema will be ignored and marked as invalid in its status. + // Required. + optional PriorityLevelConfigurationReference priorityLevelConfiguration = 1; + + // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen + // FlowSchema is among those with the numerically lowest (which we take to be logically highest) + // MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. + // Note that if the precedence is not specified, it will be set to 1000 as default. + // +optional + optional int32 matchingPrecedence = 2; + + // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. + // `nil` specifies that the distinguisher is disabled and thus will always be the empty string. + // +optional + optional FlowDistinguisherMethod distinguisherMethod = 3; + + // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if + // at least one member of rules matches the request. + // if it is an empty slice, there will be no requests matching the FlowSchema. + // +listType=atomic + // +optional + repeated PolicyRulesWithSubjects rules = 4; +} + +// FlowSchemaStatus represents the current state of a FlowSchema. +message FlowSchemaStatus { + // `conditions` is a list of the current states of FlowSchema. + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + repeated FlowSchemaCondition conditions = 1; +} + +// GroupSubject holds detailed information for group-kind subject. +message GroupSubject { + // name is the user group that matches, or "*" to match all user groups. + // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some + // well-known group names. + // Required. + optional string name = 1; +} + +// LimitResponse defines how to handle requests that can not be executed right now. +// +union +message LimitResponse { + // `type` is "Queue" or "Reject". + // "Queue" means that requests that can not be executed upon arrival + // are held in a queue until they can be executed or a queuing limit + // is reached. + // "Reject" means that requests that can not be executed upon arrival + // are rejected. + // Required. + // +unionDiscriminator + optional string type = 1; + + // `queuing` holds the configuration parameters for queuing. + // This field may be non-empty only if `type` is `"Queue"`. + // +optional + optional QueuingConfiguration queuing = 2; +} + +// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. +// It addresses two issues: +// - How are requests for this priority level limited? +// - What should be done with requests that exceed the limit? +message LimitedPriorityLevelConfiguration { + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats available at this priority level. + // This is used both for requests dispatched from this priority level + // as well as requests dispatched from other priority levels + // borrowing seats from this level. + // The server's concurrency limit (ServerCL) is divided among the + // Limited priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // This field has a default value of 30. + // +optional + optional int32 nominalConcurrencyShares = 1; + + // `limitResponse` indicates what to do with requests that can not be executed right now + optional LimitResponse limitResponse = 2; + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. The value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + optional int32 lendablePercent = 3; + + // `borrowingLimitPercent`, if present, configures a limit on how many + // seats this priority level can borrow from other priority levels. + // The limit is known as this level's BorrowingConcurrencyLimit + // (BorrowingCL) and is a limit on the total number of seats that this + // level may borrow at any one time. + // This field holds the ratio of that limit to the level's nominal + // concurrency limit. When this field is non-nil, it must hold a + // non-negative integer and the limit is calculated as follows. + // + // BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 ) + // + // The value of this field can be more than 100, implying that this + // priority level can borrow a number of seats that is greater than + // its own nominal concurrency limit (NominalCL). + // When this field is left `nil`, the limit is effectively infinite. + // +optional + optional int32 borrowingLimitPercent = 4; +} + +// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the +// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member +// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request. +message NonResourcePolicyRule { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs. If it is present, it must be the only entry. + // +listType=set + // Required. + repeated string verbs = 1; + + // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. + // For example: + // - "/healthz" is legal + // - "/hea*" is illegal + // - "/hea" is legal but matches nothing + // - "/hea/*" also matches nothing + // - "/healthz/*" matches all per-component health checks. + // "*" matches all non-resource urls. if it is present, it must be the only entry. + // +listType=set + // Required. + repeated string nonResourceURLs = 6; +} + +// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject +// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches +// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member +// of resourceRules or nonResourceRules matches the request. +message PolicyRulesWithSubjects { + // subjects is the list of normal user, serviceaccount, or group that this rule cares about. + // There must be at least one member in this slice. + // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. + // +listType=atomic + // Required. + repeated Subject subjects = 1; + + // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the + // target resource. + // At least one of `resourceRules` and `nonResourceRules` has to be non-empty. + // +listType=atomic + // +optional + repeated ResourcePolicyRule resourceRules = 2; + + // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb + // and the target non-resource URL. + // +listType=atomic + // +optional + repeated NonResourcePolicyRule nonResourceRules = 3; +} + +// PriorityLevelConfiguration represents the configuration of a priority level. +message PriorityLevelConfiguration { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // `spec` is the specification of the desired behavior of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional PriorityLevelConfigurationSpec spec = 2; + + // `status` is the current status of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional PriorityLevelConfigurationStatus status = 3; +} + +// PriorityLevelConfigurationCondition defines the condition of priority level. +message PriorityLevelConfigurationCondition { + // `type` is the type of the condition. + // Required. + optional string type = 1; + + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + optional string status = 2; + + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + optional string reason = 4; + + // `message` is a human-readable message indicating details about last transition. + optional string message = 5; +} + +// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. +message PriorityLevelConfigurationList { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // `items` is a list of request-priorities. + repeated PriorityLevelConfiguration items = 2; +} + +// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used. +message PriorityLevelConfigurationReference { + // `name` is the name of the priority level configuration being referenced + // Required. + optional string name = 1; +} + +// PriorityLevelConfigurationSpec specifies the configuration of a priority level. +// +union +message PriorityLevelConfigurationSpec { + // `type` indicates whether this priority level is subject to + // limitation on request execution. A value of `"Exempt"` means + // that requests of this priority level are not subject to a limit + // (and thus are never queued) and do not detract from the + // capacity made available to other priority levels. A value of + // `"Limited"` means that (a) requests of this priority level + // _are_ subject to limits and (b) some of the server's limited + // capacity is made available exclusively to this priority level. + // Required. + // +unionDiscriminator + optional string type = 1; + + // `limited` specifies how requests are handled for a Limited priority level. + // This field must be non-empty if and only if `type` is `"Limited"`. + // +optional + optional LimitedPriorityLevelConfiguration limited = 2; + + // `exempt` specifies how requests are handled for an exempt priority level. + // This field MUST be empty if `type` is `"Limited"`. + // This field MAY be non-empty if `type` is `"Exempt"`. + // If empty and `type` is `"Exempt"` then the default values + // for `ExemptPriorityLevelConfiguration` apply. + // +optional + optional ExemptPriorityLevelConfiguration exempt = 3; +} + +// PriorityLevelConfigurationStatus represents the current state of a "request-priority". +message PriorityLevelConfigurationStatus { + // `conditions` is the current state of "request-priority". + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + repeated PriorityLevelConfigurationCondition conditions = 1; +} + +// QueuingConfiguration holds the configuration parameters for queuing +message QueuingConfiguration { + // `queues` is the number of queues for this priority level. The + // queues exist independently at each apiserver. The value must be + // positive. Setting it to 1 effectively precludes + // shufflesharding and thus makes the distinguisher method of + // associated flow schemas irrelevant. This field has a default + // value of 64. + // +optional + optional int32 queues = 1; + + // `handSize` is a small positive number that configures the + // shuffle sharding of requests into queues. When enqueuing a request + // at this priority level the request's flow identifier (a string + // pair) is hashed and the hash value is used to shuffle the list + // of queues and deal a hand of the size specified here. The + // request is put into one of the shortest queues in that hand. + // `handSize` must be no larger than `queues`, and should be + // significantly smaller (so that a few heavy flows do not + // saturate most of the queues). See the user-facing + // documentation for more extensive guidance on setting this + // field. This field has a default value of 8. + // +optional + optional int32 handSize = 2; + + // `queueLengthLimit` is the maximum number of requests allowed to + // be waiting in a given queue of this priority level at a time; + // excess requests are rejected. This value must be positive. If + // not specified, it will be defaulted to 50. + // +optional + optional int32 queueLengthLimit = 3; +} + +// ResourcePolicyRule is a predicate that matches some resource +// requests, testing the request's verb and the target resource. A +// ResourcePolicyRule matches a resource request if and only if: (a) +// at least one member of verbs matches the request, (b) at least one +// member of apiGroups matches the request, (c) at least one member of +// resources matches the request, and (d) either (d1) the request does +// not specify a namespace (i.e., `Namespace==""`) and clusterScope is +// true or (d2) the request specifies a namespace and least one member +// of namespaces matches the request's namespace. +message ResourcePolicyRule { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs and, if present, must be the only entry. + // +listType=set + // Required. + repeated string verbs = 1; + + // `apiGroups` is a list of matching API groups and may not be empty. + // "*" matches all API groups and, if present, must be the only entry. + // +listType=set + // Required. + repeated string apiGroups = 2; + + // `resources` is a list of matching resources (i.e., lowercase + // and plural) with, if desired, subresource. For example, [ + // "services", "nodes/status" ]. This list may not be empty. + // "*" matches all resources and, if present, must be the only entry. + // Required. + // +listType=set + repeated string resources = 3; + + // `clusterScope` indicates whether to match requests that do not + // specify a namespace (which happens either because the resource + // is not namespaced or the request targets all namespaces). + // If this field is omitted or false then the `namespaces` field + // must contain a non-empty list. + // +optional + optional bool clusterScope = 4; + + // `namespaces` is a list of target namespaces that restricts + // matches. A request that specifies a target namespace matches + // only if either (a) this list contains that target namespace or + // (b) this list contains "*". Note that "*" matches any + // specified namespace but does not match a request that _does + // not specify_ a namespace (see the `clusterScope` field for + // that). + // This list may be empty, but only if `clusterScope` is true. + // +optional + // +listType=set + repeated string namespaces = 5; +} + +// ServiceAccountSubject holds detailed information for service-account-kind subject. +message ServiceAccountSubject { + // `namespace` is the namespace of matching ServiceAccount objects. + // Required. + optional string namespace = 1; + + // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name. + // Required. + optional string name = 2; +} + +// Subject matches the originator of a request, as identified by the request authentication system. There are three +// ways of matching an originator; by user, group, or service account. +// +union +message Subject { + // `kind` indicates which one of the other fields is non-empty. + // Required + // +unionDiscriminator + optional string kind = 1; + + // `user` matches based on username. + // +optional + optional UserSubject user = 2; + + // `group` matches based on user group name. + // +optional + optional GroupSubject group = 3; + + // `serviceAccount` matches ServiceAccounts. + // +optional + optional ServiceAccountSubject serviceAccount = 4; +} + +// UserSubject holds detailed information for user-kind subject. +message UserSubject { + // `name` is the username that matches, or "*" to match all usernames. + // Required. + optional string name = 1; +} + diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/register.go b/vendor/k8s.io/api/flowcontrol/v1beta3/register.go new file mode 100644 index 000000000..baf10b182 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the name of api group +const GroupName = "flowcontrol.apiserver.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta3"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder installs the api group to a scheme + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme adds api to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &FlowSchema{}, + &FlowSchemaList{}, + &PriorityLevelConfiguration{}, + &PriorityLevelConfigurationList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/types.go b/vendor/k8s.io/api/flowcontrol/v1beta3/types.go new file mode 100644 index 000000000..810941557 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/types.go @@ -0,0 +1,659 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// These are valid wildcards. +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + NameAll = "*" + + NamespaceEvery = "*" // matches every particular namespace +) + +// System preset priority level names +const ( + PriorityLevelConfigurationNameExempt = "exempt" + PriorityLevelConfigurationNameCatchAll = "catch-all" + FlowSchemaNameExempt = "exempt" + FlowSchemaNameCatchAll = "catch-all" +) + +// Conditions +const ( + FlowSchemaConditionDangling = "Dangling" + + PriorityLevelConfigurationConditionConcurrencyShared = "ConcurrencyShared" +) + +// Constants used by api validation. +const ( + FlowSchemaMaxMatchingPrecedence int32 = 10000 +) + +// Constants for apiserver response headers. +const ( + ResponseHeaderMatchedPriorityLevelConfigurationUID = "X-Kubernetes-PF-PriorityLevel-UID" + ResponseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID" +) + +const ( + // AutoUpdateAnnotationKey is the name of an annotation that enables + // automatic update of the spec of the bootstrap configuration + // object(s), if set to 'true'. + // + // On a fresh install, all bootstrap configuration objects will have auto + // update enabled with the following annotation key: + // apf.kubernetes.io/autoupdate-spec: 'true' + // + // The kube-apiserver periodically checks the bootstrap configuration + // objects on the cluster and applies updates if necessary. + // + // kube-apiserver enforces an 'always auto-update' policy for the + // mandatory configuration object(s). This implies: + // - the auto-update annotation key is added with a value of 'true' + // if it is missing. + // - the auto-update annotation key is set to 'true' if its current value + // is a boolean false or has an invalid boolean representation + // (if the cluster operator sets it to 'false' it will be stomped) + // - any changes to the spec made by the cluster operator will be + // stomped, except for changes to the `nominalConcurrencyShares` + // and `lendablePercent` fields of the PriorityLevelConfiguration + // named "exempt". + // + // The kube-apiserver will apply updates on the suggested configuration if: + // - the cluster operator has enabled auto-update by setting the annotation + // (apf.kubernetes.io/autoupdate-spec: 'true') or + // - the annotation key is missing but the generation is 1 + // + // If the suggested configuration object is missing the annotation key, + // kube-apiserver will update the annotation appropriately: + // - it is set to 'true' if generation of the object is '1' which usually + // indicates that the spec of the object has not been changed. + // - it is set to 'false' if generation of the object is greater than 1. + // + // The goal is to enable the kube-apiserver to apply update on suggested + // configuration objects installed by previous releases but not overwrite + // changes made by the cluster operators. + // Note that this distinction is imperfectly detected: in the case where an + // operator deletes a suggested configuration object and later creates it + // but with a variant spec and then does no updates of the object + // (generation is 1), the technique outlined above will incorrectly + // determine that the object should be auto-updated. + AutoUpdateAnnotationKey = "apf.kubernetes.io/autoupdate-spec" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with +// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". +type FlowSchema struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `spec` is the specification of the desired behavior of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec FlowSchemaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // `status` is the current status of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status FlowSchemaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// FlowSchemaList is a list of FlowSchema objects. +type FlowSchemaList struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // `items` is a list of FlowSchemas. + Items []FlowSchema `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// FlowSchemaSpec describes how the FlowSchema's specification looks like. +type FlowSchemaSpec struct { + // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot + // be resolved, the FlowSchema will be ignored and marked as invalid in its status. + // Required. + PriorityLevelConfiguration PriorityLevelConfigurationReference `json:"priorityLevelConfiguration" protobuf:"bytes,1,opt,name=priorityLevelConfiguration"` + // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen + // FlowSchema is among those with the numerically lowest (which we take to be logically highest) + // MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. + // Note that if the precedence is not specified, it will be set to 1000 as default. + // +optional + MatchingPrecedence int32 `json:"matchingPrecedence" protobuf:"varint,2,opt,name=matchingPrecedence"` + // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. + // `nil` specifies that the distinguisher is disabled and thus will always be the empty string. + // +optional + DistinguisherMethod *FlowDistinguisherMethod `json:"distinguisherMethod,omitempty" protobuf:"bytes,3,opt,name=distinguisherMethod"` + // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if + // at least one member of rules matches the request. + // if it is an empty slice, there will be no requests matching the FlowSchema. + // +listType=atomic + // +optional + Rules []PolicyRulesWithSubjects `json:"rules,omitempty" protobuf:"bytes,4,rep,name=rules"` +} + +// FlowDistinguisherMethodType is the type of flow distinguisher method +type FlowDistinguisherMethodType string + +// These are valid flow-distinguisher methods. +const ( + // FlowDistinguisherMethodByUserType specifies that the flow distinguisher is the username in the request. + // This type is used to provide some insulation between users. + FlowDistinguisherMethodByUserType FlowDistinguisherMethodType = "ByUser" + + // FlowDistinguisherMethodByNamespaceType specifies that the flow distinguisher is the namespace of the + // object that the request acts upon. If the object is not namespaced, or if the request is a non-resource + // request, then the distinguisher will be the empty string. An example usage of this type is to provide + // some insulation between tenants in a situation where there are multiple tenants and each namespace + // is dedicated to a tenant. + FlowDistinguisherMethodByNamespaceType FlowDistinguisherMethodType = "ByNamespace" +) + +// FlowDistinguisherMethod specifies the method of a flow distinguisher. +type FlowDistinguisherMethod struct { + // `type` is the type of flow distinguisher method + // The supported types are "ByUser" and "ByNamespace". + // Required. + Type FlowDistinguisherMethodType `json:"type" protobuf:"bytes,1,opt,name=type"` +} + +// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used. +type PriorityLevelConfigurationReference struct { + // `name` is the name of the priority level configuration being referenced + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject +// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches +// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member +// of resourceRules or nonResourceRules matches the request. +type PolicyRulesWithSubjects struct { + // subjects is the list of normal user, serviceaccount, or group that this rule cares about. + // There must be at least one member in this slice. + // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. + // +listType=atomic + // Required. + Subjects []Subject `json:"subjects" protobuf:"bytes,1,rep,name=subjects"` + // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the + // target resource. + // At least one of `resourceRules` and `nonResourceRules` has to be non-empty. + // +listType=atomic + // +optional + ResourceRules []ResourcePolicyRule `json:"resourceRules,omitempty" protobuf:"bytes,2,opt,name=resourceRules"` + // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb + // and the target non-resource URL. + // +listType=atomic + // +optional + NonResourceRules []NonResourcePolicyRule `json:"nonResourceRules,omitempty" protobuf:"bytes,3,opt,name=nonResourceRules"` +} + +// Subject matches the originator of a request, as identified by the request authentication system. There are three +// ways of matching an originator; by user, group, or service account. +// +union +type Subject struct { + // `kind` indicates which one of the other fields is non-empty. + // Required + // +unionDiscriminator + Kind SubjectKind `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // `user` matches based on username. + // +optional + User *UserSubject `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // `group` matches based on user group name. + // +optional + Group *GroupSubject `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"` + // `serviceAccount` matches ServiceAccounts. + // +optional + ServiceAccount *ServiceAccountSubject `json:"serviceAccount,omitempty" protobuf:"bytes,4,opt,name=serviceAccount"` +} + +// SubjectKind is the kind of subject. +type SubjectKind string + +// Supported subject's kinds. +const ( + SubjectKindUser SubjectKind = "User" + SubjectKindGroup SubjectKind = "Group" + SubjectKindServiceAccount SubjectKind = "ServiceAccount" +) + +// UserSubject holds detailed information for user-kind subject. +type UserSubject struct { + // `name` is the username that matches, or "*" to match all usernames. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// GroupSubject holds detailed information for group-kind subject. +type GroupSubject struct { + // name is the user group that matches, or "*" to match all user groups. + // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some + // well-known group names. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// ServiceAccountSubject holds detailed information for service-account-kind subject. +type ServiceAccountSubject struct { + // `namespace` is the namespace of matching ServiceAccount objects. + // Required. + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name. + // Required. + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` +} + +// ResourcePolicyRule is a predicate that matches some resource +// requests, testing the request's verb and the target resource. A +// ResourcePolicyRule matches a resource request if and only if: (a) +// at least one member of verbs matches the request, (b) at least one +// member of apiGroups matches the request, (c) at least one member of +// resources matches the request, and (d) either (d1) the request does +// not specify a namespace (i.e., `Namespace==""`) and clusterScope is +// true or (d2) the request specifies a namespace and least one member +// of namespaces matches the request's namespace. +type ResourcePolicyRule struct { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs and, if present, must be the only entry. + // +listType=set + // Required. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // `apiGroups` is a list of matching API groups and may not be empty. + // "*" matches all API groups and, if present, must be the only entry. + // +listType=set + // Required. + APIGroups []string `json:"apiGroups" protobuf:"bytes,2,rep,name=apiGroups"` + + // `resources` is a list of matching resources (i.e., lowercase + // and plural) with, if desired, subresource. For example, [ + // "services", "nodes/status" ]. This list may not be empty. + // "*" matches all resources and, if present, must be the only entry. + // Required. + // +listType=set + Resources []string `json:"resources" protobuf:"bytes,3,rep,name=resources"` + + // `clusterScope` indicates whether to match requests that do not + // specify a namespace (which happens either because the resource + // is not namespaced or the request targets all namespaces). + // If this field is omitted or false then the `namespaces` field + // must contain a non-empty list. + // +optional + ClusterScope bool `json:"clusterScope,omitempty" protobuf:"varint,4,opt,name=clusterScope"` + + // `namespaces` is a list of target namespaces that restricts + // matches. A request that specifies a target namespace matches + // only if either (a) this list contains that target namespace or + // (b) this list contains "*". Note that "*" matches any + // specified namespace but does not match a request that _does + // not specify_ a namespace (see the `clusterScope` field for + // that). + // This list may be empty, but only if `clusterScope` is true. + // +optional + // +listType=set + Namespaces []string `json:"namespaces" protobuf:"bytes,5,rep,name=namespaces"` +} + +// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the +// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member +// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request. +type NonResourcePolicyRule struct { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs. If it is present, it must be the only entry. + // +listType=set + // Required. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. + // For example: + // - "/healthz" is legal + // - "/hea*" is illegal + // - "/hea" is legal but matches nothing + // - "/hea/*" also matches nothing + // - "/healthz/*" matches all per-component health checks. + // "*" matches all non-resource urls. if it is present, it must be the only entry. + // +listType=set + // Required. + NonResourceURLs []string `json:"nonResourceURLs" protobuf:"bytes,6,rep,name=nonResourceURLs"` +} + +// FlowSchemaStatus represents the current state of a FlowSchema. +type FlowSchemaStatus struct { + // `conditions` is a list of the current states of FlowSchema. + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []FlowSchemaCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// FlowSchemaCondition describes conditions for a FlowSchema. +type FlowSchemaCondition struct { + // `type` is the type of the condition. + // Required. + Type FlowSchemaConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // `message` is a human-readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// FlowSchemaConditionType is a valid value for FlowSchemaStatusCondition.Type +type FlowSchemaConditionType string + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// PriorityLevelConfiguration represents the configuration of a priority level. +type PriorityLevelConfiguration struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `spec` is the specification of the desired behavior of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec PriorityLevelConfigurationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // `status` is the current status of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status PriorityLevelConfigurationStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. +type PriorityLevelConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `items` is a list of request-priorities. + Items []PriorityLevelConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PriorityLevelConfigurationSpec specifies the configuration of a priority level. +// +union +type PriorityLevelConfigurationSpec struct { + // `type` indicates whether this priority level is subject to + // limitation on request execution. A value of `"Exempt"` means + // that requests of this priority level are not subject to a limit + // (and thus are never queued) and do not detract from the + // capacity made available to other priority levels. A value of + // `"Limited"` means that (a) requests of this priority level + // _are_ subject to limits and (b) some of the server's limited + // capacity is made available exclusively to this priority level. + // Required. + // +unionDiscriminator + Type PriorityLevelEnablement `json:"type" protobuf:"bytes,1,opt,name=type"` + + // `limited` specifies how requests are handled for a Limited priority level. + // This field must be non-empty if and only if `type` is `"Limited"`. + // +optional + Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"` + + // `exempt` specifies how requests are handled for an exempt priority level. + // This field MUST be empty if `type` is `"Limited"`. + // This field MAY be non-empty if `type` is `"Exempt"`. + // If empty and `type` is `"Exempt"` then the default values + // for `ExemptPriorityLevelConfiguration` apply. + // +optional + Exempt *ExemptPriorityLevelConfiguration `json:"exempt,omitempty" protobuf:"bytes,3,opt,name=exempt"` +} + +// PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level +type PriorityLevelEnablement string + +// Supported priority level enablement values. +const ( + // PriorityLevelEnablementExempt means that requests are not subject to limits + PriorityLevelEnablementExempt PriorityLevelEnablement = "Exempt" + + // PriorityLevelEnablementLimited means that requests are subject to limits + PriorityLevelEnablementLimited PriorityLevelEnablement = "Limited" +) + +// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. +// It addresses two issues: +// - How are requests for this priority level limited? +// - What should be done with requests that exceed the limit? +type LimitedPriorityLevelConfiguration struct { + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats available at this priority level. + // This is used both for requests dispatched from this priority level + // as well as requests dispatched from other priority levels + // borrowing seats from this level. + // The server's concurrency limit (ServerCL) is divided among the + // Limited priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // This field has a default value of 30. + // +optional + NominalConcurrencyShares int32 `json:"nominalConcurrencyShares" protobuf:"varint,1,opt,name=nominalConcurrencyShares"` + + // `limitResponse` indicates what to do with requests that can not be executed right now + LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"` + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. The value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + LendablePercent *int32 `json:"lendablePercent,omitempty" protobuf:"varint,3,opt,name=lendablePercent"` + + // `borrowingLimitPercent`, if present, configures a limit on how many + // seats this priority level can borrow from other priority levels. + // The limit is known as this level's BorrowingConcurrencyLimit + // (BorrowingCL) and is a limit on the total number of seats that this + // level may borrow at any one time. + // This field holds the ratio of that limit to the level's nominal + // concurrency limit. When this field is non-nil, it must hold a + // non-negative integer and the limit is calculated as follows. + // + // BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 ) + // + // The value of this field can be more than 100, implying that this + // priority level can borrow a number of seats that is greater than + // its own nominal concurrency limit (NominalCL). + // When this field is left `nil`, the limit is effectively infinite. + // +optional + BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty" protobuf:"varint,4,opt,name=borrowingLimitPercent"` +} + +// ExemptPriorityLevelConfiguration describes the configurable aspects +// of the handling of exempt requests. +// In the mandatory exempt configuration object the values in the fields +// here can be modified by authorized users, unlike the rest of the `spec`. +type ExemptPriorityLevelConfiguration struct { + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats nominally reserved for this priority level. + // This DOES NOT limit the dispatching from this priority level + // but affects the other priority levels through the borrowing mechanism. + // The server's concurrency limit (ServerCL) is divided among all the + // priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // This field has a default value of zero. + // +optional + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty" protobuf:"varint,1,opt,name=nominalConcurrencyShares"` + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. This value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + LendablePercent *int32 `json:"lendablePercent,omitempty" protobuf:"varint,2,opt,name=lendablePercent"` + // The `BorrowingCL` of an Exempt priority level is implicitly `ServerCL`. + // In other words, an exempt priority level + // has no meaningful limit on how much it borrows. + // There is no explicit representation of that here. +} + +// LimitResponse defines how to handle requests that can not be executed right now. +// +union +type LimitResponse struct { + // `type` is "Queue" or "Reject". + // "Queue" means that requests that can not be executed upon arrival + // are held in a queue until they can be executed or a queuing limit + // is reached. + // "Reject" means that requests that can not be executed upon arrival + // are rejected. + // Required. + // +unionDiscriminator + Type LimitResponseType `json:"type" protobuf:"bytes,1,opt,name=type"` + + // `queuing` holds the configuration parameters for queuing. + // This field may be non-empty only if `type` is `"Queue"`. + // +optional + Queuing *QueuingConfiguration `json:"queuing,omitempty" protobuf:"bytes,2,opt,name=queuing"` +} + +// LimitResponseType identifies how a Limited priority level handles a request that can not be executed right now +type LimitResponseType string + +// Supported limit responses. +const ( + // LimitResponseTypeQueue means that requests that can not be executed right now are queued until they can be executed or a queuing limit is hit + LimitResponseTypeQueue LimitResponseType = "Queue" + + // LimitResponseTypeReject means that requests that can not be executed right now are rejected + LimitResponseTypeReject LimitResponseType = "Reject" +) + +// QueuingConfiguration holds the configuration parameters for queuing +type QueuingConfiguration struct { + // `queues` is the number of queues for this priority level. The + // queues exist independently at each apiserver. The value must be + // positive. Setting it to 1 effectively precludes + // shufflesharding and thus makes the distinguisher method of + // associated flow schemas irrelevant. This field has a default + // value of 64. + // +optional + Queues int32 `json:"queues" protobuf:"varint,1,opt,name=queues"` + + // `handSize` is a small positive number that configures the + // shuffle sharding of requests into queues. When enqueuing a request + // at this priority level the request's flow identifier (a string + // pair) is hashed and the hash value is used to shuffle the list + // of queues and deal a hand of the size specified here. The + // request is put into one of the shortest queues in that hand. + // `handSize` must be no larger than `queues`, and should be + // significantly smaller (so that a few heavy flows do not + // saturate most of the queues). See the user-facing + // documentation for more extensive guidance on setting this + // field. This field has a default value of 8. + // +optional + HandSize int32 `json:"handSize" protobuf:"varint,2,opt,name=handSize"` + + // `queueLengthLimit` is the maximum number of requests allowed to + // be waiting in a given queue of this priority level at a time; + // excess requests are rejected. This value must be positive. If + // not specified, it will be defaulted to 50. + // +optional + QueueLengthLimit int32 `json:"queueLengthLimit" protobuf:"varint,3,opt,name=queueLengthLimit"` +} + +// PriorityLevelConfigurationConditionType is a valid value for PriorityLevelConfigurationStatusCondition.Type +type PriorityLevelConfigurationConditionType string + +// PriorityLevelConfigurationStatus represents the current state of a "request-priority". +type PriorityLevelConfigurationStatus struct { + // `conditions` is the current state of "request-priority". + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// PriorityLevelConfigurationCondition defines the condition of priority level. +type PriorityLevelConfigurationCondition struct { + // `type` is the type of the condition. + // Required. + Type PriorityLevelConfigurationConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // `message` is a human-readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// ConditionStatus is the status of the condition. +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go new file mode 100644 index 000000000..fa76112a7 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go @@ -0,0 +1,274 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta3 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ExemptPriorityLevelConfiguration = map[string]string{ + "": "ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`.", + "nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.", + "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", +} + +func (ExemptPriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_ExemptPriorityLevelConfiguration +} + +var map_FlowDistinguisherMethod = map[string]string{ + "": "FlowDistinguisherMethod specifies the method of a flow distinguisher.", + "type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.", +} + +func (FlowDistinguisherMethod) SwaggerDoc() map[string]string { + return map_FlowDistinguisherMethod +} + +var map_FlowSchema = map[string]string{ + "": "FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with similar attributes and is identified by a pair of strings: the name of the FlowSchema and a \"flow distinguisher\".", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "`spec` is the specification of the desired behavior of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "`status` is the current status of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (FlowSchema) SwaggerDoc() map[string]string { + return map_FlowSchema +} + +var map_FlowSchemaCondition = map[string]string{ + "": "FlowSchemaCondition describes conditions for a FlowSchema.", + "type": "`type` is the type of the condition. Required.", + "status": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "`message` is a human-readable message indicating details about last transition.", +} + +func (FlowSchemaCondition) SwaggerDoc() map[string]string { + return map_FlowSchemaCondition +} + +var map_FlowSchemaList = map[string]string{ + "": "FlowSchemaList is a list of FlowSchema objects.", + "metadata": "`metadata` is the standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "`items` is a list of FlowSchemas.", +} + +func (FlowSchemaList) SwaggerDoc() map[string]string { + return map_FlowSchemaList +} + +var map_FlowSchemaSpec = map[string]string{ + "": "FlowSchemaSpec describes how the FlowSchema's specification looks like.", + "priorityLevelConfiguration": "`priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot be resolved, the FlowSchema will be ignored and marked as invalid in its status. Required.", + "matchingPrecedence": "`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default.", + "distinguisherMethod": "`distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. `nil` specifies that the distinguisher is disabled and thus will always be the empty string.", + "rules": "`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema.", +} + +func (FlowSchemaSpec) SwaggerDoc() map[string]string { + return map_FlowSchemaSpec +} + +var map_FlowSchemaStatus = map[string]string{ + "": "FlowSchemaStatus represents the current state of a FlowSchema.", + "conditions": "`conditions` is a list of the current states of FlowSchema.", +} + +func (FlowSchemaStatus) SwaggerDoc() map[string]string { + return map_FlowSchemaStatus +} + +var map_GroupSubject = map[string]string{ + "": "GroupSubject holds detailed information for group-kind subject.", + "name": "name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required.", +} + +func (GroupSubject) SwaggerDoc() map[string]string { + return map_GroupSubject +} + +var map_LimitResponse = map[string]string{ + "": "LimitResponse defines how to handle requests that can not be executed right now.", + "type": "`type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required.", + "queuing": "`queuing` holds the configuration parameters for queuing. This field may be non-empty only if `type` is `\"Queue\"`.", +} + +func (LimitResponse) SwaggerDoc() map[string]string { + return map_LimitResponse +} + +var map_LimitedPriorityLevelConfiguration = map[string]string{ + "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", + "nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of 30.", + "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", + "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", + "borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", +} + +func (LimitedPriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_LimitedPriorityLevelConfiguration +} + +var map_NonResourcePolicyRule = map[string]string{ + "": "NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.", + "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required.", + "nonResourceURLs": "`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example:\n - \"/healthz\" is legal\n - \"/hea*\" is illegal\n - \"/hea\" is legal but matches nothing\n - \"/hea/*\" also matches nothing\n - \"/healthz/*\" matches all per-component health checks.\n\"*\" matches all non-resource urls. if it is present, it must be the only entry. Required.", +} + +func (NonResourcePolicyRule) SwaggerDoc() map[string]string { + return map_NonResourcePolicyRule +} + +var map_PolicyRulesWithSubjects = map[string]string{ + "": "PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member of resourceRules or nonResourceRules matches the request.", + "subjects": "subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required.", + "resourceRules": "`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty.", + "nonResourceRules": "`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL.", +} + +func (PolicyRulesWithSubjects) SwaggerDoc() map[string]string { + return map_PolicyRulesWithSubjects +} + +var map_PriorityLevelConfiguration = map[string]string{ + "": "PriorityLevelConfiguration represents the configuration of a priority level.", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "`spec` is the specification of the desired behavior of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "`status` is the current status of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (PriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_PriorityLevelConfiguration +} + +var map_PriorityLevelConfigurationCondition = map[string]string{ + "": "PriorityLevelConfigurationCondition defines the condition of priority level.", + "type": "`type` is the type of the condition. Required.", + "status": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "`message` is a human-readable message indicating details about last transition.", +} + +func (PriorityLevelConfigurationCondition) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationCondition +} + +var map_PriorityLevelConfigurationList = map[string]string{ + "": "PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "`items` is a list of request-priorities.", +} + +func (PriorityLevelConfigurationList) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationList +} + +var map_PriorityLevelConfigurationReference = map[string]string{ + "": "PriorityLevelConfigurationReference contains information that points to the \"request-priority\" being used.", + "name": "`name` is the name of the priority level configuration being referenced Required.", +} + +func (PriorityLevelConfigurationReference) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationReference +} + +var map_PriorityLevelConfigurationSpec = map[string]string{ + "": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.", + "type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.", + "limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.", + "exempt": "`exempt` specifies how requests are handled for an exempt priority level. This field MUST be empty if `type` is `\"Limited\"`. This field MAY be non-empty if `type` is `\"Exempt\"`. If empty and `type` is `\"Exempt\"` then the default values for `ExemptPriorityLevelConfiguration` apply.", +} + +func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationSpec +} + +var map_PriorityLevelConfigurationStatus = map[string]string{ + "": "PriorityLevelConfigurationStatus represents the current state of a \"request-priority\".", + "conditions": "`conditions` is the current state of \"request-priority\".", +} + +func (PriorityLevelConfigurationStatus) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationStatus +} + +var map_QueuingConfiguration = map[string]string{ + "": "QueuingConfiguration holds the configuration parameters for queuing", + "queues": "`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64.", + "handSize": "`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.", + "queueLengthLimit": "`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50.", +} + +func (QueuingConfiguration) SwaggerDoc() map[string]string { + return map_QueuingConfiguration +} + +var map_ResourcePolicyRule = map[string]string{ + "": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) either (d1) the request does not specify a namespace (i.e., `Namespace==\"\"`) and clusterScope is true or (d2) the request specifies a namespace and least one member of namespaces matches the request's namespace.", + "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required.", + "apiGroups": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required.", + "resources": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required.", + "clusterScope": "`clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list.", + "namespaces": "`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \"*\". Note that \"*\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true.", +} + +func (ResourcePolicyRule) SwaggerDoc() map[string]string { + return map_ResourcePolicyRule +} + +var map_ServiceAccountSubject = map[string]string{ + "": "ServiceAccountSubject holds detailed information for service-account-kind subject.", + "namespace": "`namespace` is the namespace of matching ServiceAccount objects. Required.", + "name": "`name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required.", +} + +func (ServiceAccountSubject) SwaggerDoc() map[string]string { + return map_ServiceAccountSubject +} + +var map_Subject = map[string]string{ + "": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.", + "kind": "`kind` indicates which one of the other fields is non-empty. Required", + "user": "`user` matches based on username.", + "group": "`group` matches based on user group name.", + "serviceAccount": "`serviceAccount` matches ServiceAccounts.", +} + +func (Subject) SwaggerDoc() map[string]string { + return map_Subject +} + +var map_UserSubject = map[string]string{ + "": "UserSubject holds detailed information for user-kind subject.", + "name": "`name` is the username that matches, or \"*\" to match all usernames. Required.", +} + +func (UserSubject) SwaggerDoc() map[string]string { + return map_UserSubject +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.deepcopy.go new file mode 100644 index 000000000..09fefa20a --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.deepcopy.go @@ -0,0 +1,583 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta3 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExemptPriorityLevelConfiguration) DeepCopyInto(out *ExemptPriorityLevelConfiguration) { + *out = *in + if in.NominalConcurrencyShares != nil { + in, out := &in.NominalConcurrencyShares, &out.NominalConcurrencyShares + *out = new(int32) + **out = **in + } + if in.LendablePercent != nil { + in, out := &in.LendablePercent, &out.LendablePercent + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExemptPriorityLevelConfiguration. +func (in *ExemptPriorityLevelConfiguration) DeepCopy() *ExemptPriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(ExemptPriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowDistinguisherMethod. +func (in *FlowDistinguisherMethod) DeepCopy() *FlowDistinguisherMethod { + if in == nil { + return nil + } + out := new(FlowDistinguisherMethod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchema) DeepCopyInto(out *FlowSchema) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchema. +func (in *FlowSchema) DeepCopy() *FlowSchema { + if in == nil { + return nil + } + out := new(FlowSchema) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowSchema) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaCondition) DeepCopyInto(out *FlowSchemaCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaCondition. +func (in *FlowSchemaCondition) DeepCopy() *FlowSchemaCondition { + if in == nil { + return nil + } + out := new(FlowSchemaCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaList) DeepCopyInto(out *FlowSchemaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FlowSchema, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaList. +func (in *FlowSchemaList) DeepCopy() *FlowSchemaList { + if in == nil { + return nil + } + out := new(FlowSchemaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowSchemaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaSpec) DeepCopyInto(out *FlowSchemaSpec) { + *out = *in + out.PriorityLevelConfiguration = in.PriorityLevelConfiguration + if in.DistinguisherMethod != nil { + in, out := &in.DistinguisherMethod, &out.DistinguisherMethod + *out = new(FlowDistinguisherMethod) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRulesWithSubjects, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaSpec. +func (in *FlowSchemaSpec) DeepCopy() *FlowSchemaSpec { + if in == nil { + return nil + } + out := new(FlowSchemaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaStatus) DeepCopyInto(out *FlowSchemaStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]FlowSchemaCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaStatus. +func (in *FlowSchemaStatus) DeepCopy() *FlowSchemaStatus { + if in == nil { + return nil + } + out := new(FlowSchemaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupSubject) DeepCopyInto(out *GroupSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupSubject. +func (in *GroupSubject) DeepCopy() *GroupSubject { + if in == nil { + return nil + } + out := new(GroupSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitResponse) DeepCopyInto(out *LimitResponse) { + *out = *in + if in.Queuing != nil { + in, out := &in.Queuing, &out.Queuing + *out = new(QueuingConfiguration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitResponse. +func (in *LimitResponse) DeepCopy() *LimitResponse { + if in == nil { + return nil + } + out := new(LimitResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) { + *out = *in + in.LimitResponse.DeepCopyInto(&out.LimitResponse) + if in.LendablePercent != nil { + in, out := &in.LendablePercent, &out.LendablePercent + *out = new(int32) + **out = **in + } + if in.BorrowingLimitPercent != nil { + in, out := &in.BorrowingLimitPercent, &out.BorrowingLimitPercent + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitedPriorityLevelConfiguration. +func (in *LimitedPriorityLevelConfiguration) DeepCopy() *LimitedPriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(LimitedPriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonResourcePolicyRule) DeepCopyInto(out *NonResourcePolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourcePolicyRule. +func (in *NonResourcePolicyRule) DeepCopy() *NonResourcePolicyRule { + if in == nil { + return nil + } + out := new(NonResourcePolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRulesWithSubjects) DeepCopyInto(out *PolicyRulesWithSubjects) { + *out = *in + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceRules != nil { + in, out := &in.ResourceRules, &out.ResourceRules + *out = make([]ResourcePolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NonResourceRules != nil { + in, out := &in.NonResourceRules, &out.NonResourceRules + *out = make([]NonResourcePolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRulesWithSubjects. +func (in *PolicyRulesWithSubjects) DeepCopy() *PolicyRulesWithSubjects { + if in == nil { + return nil + } + out := new(PolicyRulesWithSubjects) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfiguration) DeepCopyInto(out *PriorityLevelConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfiguration. +func (in *PriorityLevelConfiguration) DeepCopy() *PriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(PriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityLevelConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationCondition) DeepCopyInto(out *PriorityLevelConfigurationCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationCondition. +func (in *PriorityLevelConfigurationCondition) DeepCopy() *PriorityLevelConfigurationCondition { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationList) DeepCopyInto(out *PriorityLevelConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PriorityLevelConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationList. +func (in *PriorityLevelConfigurationList) DeepCopy() *PriorityLevelConfigurationList { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityLevelConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationReference) DeepCopyInto(out *PriorityLevelConfigurationReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationReference. +func (in *PriorityLevelConfigurationReference) DeepCopy() *PriorityLevelConfigurationReference { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigurationSpec) { + *out = *in + if in.Limited != nil { + in, out := &in.Limited, &out.Limited + *out = new(LimitedPriorityLevelConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Exempt != nil { + in, out := &in.Exempt, &out.Exempt + *out = new(ExemptPriorityLevelConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationSpec. +func (in *PriorityLevelConfigurationSpec) DeepCopy() *PriorityLevelConfigurationSpec { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationStatus) DeepCopyInto(out *PriorityLevelConfigurationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PriorityLevelConfigurationCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationStatus. +func (in *PriorityLevelConfigurationStatus) DeepCopy() *PriorityLevelConfigurationStatus { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueuingConfiguration) DeepCopyInto(out *QueuingConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueuingConfiguration. +func (in *QueuingConfiguration) DeepCopy() *QueuingConfiguration { + if in == nil { + return nil + } + out := new(QueuingConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyRule) DeepCopyInto(out *ResourcePolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyRule. +func (in *ResourcePolicyRule) DeepCopy() *ResourcePolicyRule { + if in == nil { + return nil + } + out := new(ResourcePolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountSubject) DeepCopyInto(out *ServiceAccountSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountSubject. +func (in *ServiceAccountSubject) DeepCopy() *ServiceAccountSubject { + if in == nil { + return nil + } + out := new(ServiceAccountSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subject) DeepCopyInto(out *Subject) { + *out = *in + if in.User != nil { + in, out := &in.User, &out.User + *out = new(UserSubject) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(GroupSubject) + **out = **in + } + if in.ServiceAccount != nil { + in, out := &in.ServiceAccount, &out.ServiceAccount + *out = new(ServiceAccountSubject) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. +func (in *Subject) DeepCopy() *Subject { + if in == nil { + return nil + } + out := new(Subject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSubject) DeepCopyInto(out *UserSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSubject. +func (in *UserSubject) DeepCopy() *UserSubject { + if in == nil { + return nil + } + out := new(UserSubject) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..24b761385 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,94 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1beta3 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *FlowSchema) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchemaList) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} diff --git a/vendor/k8s.io/api/networking/v1/generated.pb.go b/vendor/k8s.io/api/networking/v1/generated.pb.go index 631acbf93..daeaea5dc 100644 --- a/vendor/k8s.io/api/networking/v1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1/generated.pb.go @@ -328,10 +328,94 @@ func (m *IngressList) XXX_DiscardUnknown() { var xxx_messageInfo_IngressList proto.InternalMessageInfo +func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} } +func (*IngressLoadBalancerIngress) ProtoMessage() {} +func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{10} +} +func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressLoadBalancerIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressLoadBalancerIngress) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressLoadBalancerIngress.Merge(m, src) +} +func (m *IngressLoadBalancerIngress) XXX_Size() int { + return m.Size() +} +func (m *IngressLoadBalancerIngress) XXX_DiscardUnknown() { + xxx_messageInfo_IngressLoadBalancerIngress.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo + +func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} } +func (*IngressLoadBalancerStatus) ProtoMessage() {} +func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{11} +} +func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressLoadBalancerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressLoadBalancerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressLoadBalancerStatus.Merge(m, src) +} +func (m *IngressLoadBalancerStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressLoadBalancerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressLoadBalancerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo + +func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} } +func (*IngressPortStatus) ProtoMessage() {} +func (*IngressPortStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{12} +} +func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressPortStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressPortStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressPortStatus.Merge(m, src) +} +func (m *IngressPortStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressPortStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressPortStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo + func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} func (*IngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{10} + return fileDescriptor_1c72867a70a7cc90, []int{13} } func (m *IngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -359,7 +443,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} func (*IngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{11} + return fileDescriptor_1c72867a70a7cc90, []int{14} } func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -387,7 +471,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo func (m *IngressServiceBackend) Reset() { *m = IngressServiceBackend{} } func (*IngressServiceBackend) ProtoMessage() {} func (*IngressServiceBackend) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{12} + return fileDescriptor_1c72867a70a7cc90, []int{15} } func (m *IngressServiceBackend) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -415,7 +499,7 @@ var xxx_messageInfo_IngressServiceBackend proto.InternalMessageInfo func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} func (*IngressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{13} + return fileDescriptor_1c72867a70a7cc90, []int{16} } func (m *IngressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -443,7 +527,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} func (*IngressStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{14} + return fileDescriptor_1c72867a70a7cc90, []int{17} } func (m *IngressStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -471,7 +555,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} func (*IngressTLS) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{15} + return fileDescriptor_1c72867a70a7cc90, []int{18} } func (m *IngressTLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -499,7 +583,7 @@ var xxx_messageInfo_IngressTLS proto.InternalMessageInfo func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (*NetworkPolicy) ProtoMessage() {} func (*NetworkPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{16} + return fileDescriptor_1c72867a70a7cc90, []int{19} } func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -527,7 +611,7 @@ var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{17} + return fileDescriptor_1c72867a70a7cc90, []int{20} } func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -555,7 +639,7 @@ var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{18} + return fileDescriptor_1c72867a70a7cc90, []int{21} } func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -583,7 +667,7 @@ var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (*NetworkPolicyList) ProtoMessage() {} func (*NetworkPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{19} + return fileDescriptor_1c72867a70a7cc90, []int{22} } func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -611,7 +695,7 @@ var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (*NetworkPolicyPeer) ProtoMessage() {} func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{20} + return fileDescriptor_1c72867a70a7cc90, []int{23} } func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -639,7 +723,7 @@ var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (*NetworkPolicyPort) ProtoMessage() {} func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{21} + return fileDescriptor_1c72867a70a7cc90, []int{24} } func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -667,7 +751,7 @@ var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (*NetworkPolicySpec) ProtoMessage() {} func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{22} + return fileDescriptor_1c72867a70a7cc90, []int{25} } func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -692,38 +776,10 @@ func (m *NetworkPolicySpec) XXX_DiscardUnknown() { var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo -func (m *NetworkPolicyStatus) Reset() { *m = NetworkPolicyStatus{} } -func (*NetworkPolicyStatus) ProtoMessage() {} -func (*NetworkPolicyStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{23} -} -func (m *NetworkPolicyStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetworkPolicyStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NetworkPolicyStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkPolicyStatus.Merge(m, src) -} -func (m *NetworkPolicyStatus) XXX_Size() int { - return m.Size() -} -func (m *NetworkPolicyStatus) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkPolicyStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_NetworkPolicyStatus proto.InternalMessageInfo - func (m *ServiceBackendPort) Reset() { *m = ServiceBackendPort{} } func (*ServiceBackendPort) ProtoMessage() {} func (*ServiceBackendPort) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{24} + return fileDescriptor_1c72867a70a7cc90, []int{26} } func (m *ServiceBackendPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -759,6 +815,9 @@ func init() { proto.RegisterType((*IngressClassParametersReference)(nil), "k8s.io.api.networking.v1.IngressClassParametersReference") proto.RegisterType((*IngressClassSpec)(nil), "k8s.io.api.networking.v1.IngressClassSpec") proto.RegisterType((*IngressList)(nil), "k8s.io.api.networking.v1.IngressList") + proto.RegisterType((*IngressLoadBalancerIngress)(nil), "k8s.io.api.networking.v1.IngressLoadBalancerIngress") + proto.RegisterType((*IngressLoadBalancerStatus)(nil), "k8s.io.api.networking.v1.IngressLoadBalancerStatus") + proto.RegisterType((*IngressPortStatus)(nil), "k8s.io.api.networking.v1.IngressPortStatus") proto.RegisterType((*IngressRule)(nil), "k8s.io.api.networking.v1.IngressRule") proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.networking.v1.IngressRuleValue") proto.RegisterType((*IngressServiceBackend)(nil), "k8s.io.api.networking.v1.IngressServiceBackend") @@ -772,7 +831,6 @@ func init() { proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.networking.v1.NetworkPolicyPeer") proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.networking.v1.NetworkPolicyPort") proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.networking.v1.NetworkPolicySpec") - proto.RegisterType((*NetworkPolicyStatus)(nil), "k8s.io.api.networking.v1.NetworkPolicyStatus") proto.RegisterType((*ServiceBackendPort)(nil), "k8s.io.api.networking.v1.ServiceBackendPort") } @@ -781,107 +839,112 @@ func init() { } var fileDescriptor_1c72867a70a7cc90 = []byte{ - // 1589 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcf, 0x6f, 0x1b, 0xc5, - 0x17, 0xcf, 0x3a, 0x71, 0xec, 0x3c, 0x27, 0x69, 0x3a, 0x6d, 0xf5, 0xb5, 0xfa, 0x15, 0x76, 0x58, - 0xd1, 0x36, 0x50, 0x6a, 0xd3, 0xb4, 0x42, 0x70, 0x01, 0xba, 0x69, 0x9b, 0x86, 0xa6, 0x89, 0x35, - 0x36, 0x45, 0x20, 0x40, 0xdd, 0xac, 0x27, 0xce, 0xd6, 0xeb, 0x9d, 0x65, 0x76, 0x1c, 0x5a, 0x4e, - 0x5c, 0x38, 0x70, 0xe3, 0xc6, 0x99, 0x3f, 0x01, 0xc1, 0x0d, 0x41, 0xc5, 0x05, 0xf5, 0x58, 0x89, - 0x4b, 0x2f, 0x58, 0xd4, 0xfc, 0x17, 0x39, 0xa1, 0x99, 0x9d, 0xfd, 0xe1, 0x75, 0xdc, 0xac, 0xaa, - 0x2a, 0xa7, 0x64, 0xdf, 0x8f, 0xcf, 0xfb, 0x39, 0xef, 0xcd, 0x18, 0xae, 0x75, 0xdf, 0xf1, 0x6b, - 0x36, 0xad, 0x77, 0xfb, 0x3b, 0x84, 0xb9, 0x84, 0x13, 0xbf, 0xbe, 0x4f, 0xdc, 0x36, 0x65, 0x75, - 0xc5, 0x30, 0x3d, 0xbb, 0xee, 0x12, 0xfe, 0x15, 0x65, 0x5d, 0xdb, 0xed, 0xd4, 0xf7, 0x2f, 0xd7, - 0x3b, 0xc4, 0x25, 0xcc, 0xe4, 0xa4, 0x5d, 0xf3, 0x18, 0xe5, 0x14, 0x95, 0x03, 0xc9, 0x9a, 0xe9, - 0xd9, 0xb5, 0x58, 0xb2, 0xb6, 0x7f, 0xf9, 0xec, 0xa5, 0x8e, 0xcd, 0xf7, 0xfa, 0x3b, 0x35, 0x8b, - 0xf6, 0xea, 0x1d, 0xda, 0xa1, 0x75, 0xa9, 0xb0, 0xd3, 0xdf, 0x95, 0x5f, 0xf2, 0x43, 0xfe, 0x17, - 0x00, 0x9d, 0xd5, 0x13, 0x26, 0x2d, 0xca, 0xc8, 0x21, 0xc6, 0xce, 0x5e, 0x8d, 0x65, 0x7a, 0xa6, - 0xb5, 0x67, 0xbb, 0x84, 0x3d, 0xac, 0x7b, 0xdd, 0x8e, 0x20, 0xf8, 0xf5, 0x1e, 0xe1, 0xe6, 0x61, - 0x5a, 0xf5, 0x49, 0x5a, 0xac, 0xef, 0x72, 0xbb, 0x47, 0xc6, 0x14, 0xde, 0x3e, 0x4a, 0xc1, 0xb7, - 0xf6, 0x48, 0xcf, 0x1c, 0xd3, 0xbb, 0x32, 0x49, 0xaf, 0xcf, 0x6d, 0xa7, 0x6e, 0xbb, 0xdc, 0xe7, - 0x2c, 0xad, 0xa4, 0xff, 0xae, 0xc1, 0x89, 0x5b, 0xad, 0x56, 0x63, 0xc3, 0xed, 0x30, 0xe2, 0xfb, - 0x0d, 0x93, 0xef, 0xa1, 0x65, 0x98, 0xf1, 0x4c, 0xbe, 0x57, 0xd6, 0x96, 0xb5, 0x95, 0x39, 0x63, - 0xfe, 0xf1, 0xa0, 0x3a, 0x35, 0x1c, 0x54, 0x67, 0x04, 0x0f, 0x4b, 0x0e, 0xba, 0x0a, 0x45, 0xf1, - 0xb7, 0xf5, 0xd0, 0x23, 0xe5, 0x69, 0x29, 0x55, 0x1e, 0x0e, 0xaa, 0xc5, 0x86, 0xa2, 0x1d, 0x24, - 0xfe, 0xc7, 0x91, 0x24, 0x6a, 0x42, 0x61, 0xc7, 0xb4, 0xba, 0xc4, 0x6d, 0x97, 0x73, 0xcb, 0xda, - 0x4a, 0x69, 0x75, 0xa5, 0x36, 0xa9, 0x7c, 0x35, 0xe5, 0x8f, 0x11, 0xc8, 0x1b, 0x27, 0x94, 0x13, - 0x05, 0x45, 0xc0, 0x21, 0x92, 0xbe, 0x0b, 0xa7, 0x13, 0xfe, 0xe3, 0xbe, 0x43, 0xee, 0x9a, 0x4e, - 0x9f, 0xa0, 0x2d, 0xc8, 0x0b, 0xc3, 0x7e, 0x59, 0x5b, 0x9e, 0x5e, 0x29, 0xad, 0xbe, 0x3e, 0xd9, - 0x54, 0x2a, 0x7c, 0x63, 0x41, 0xd9, 0xca, 0x8b, 0x2f, 0x1f, 0x07, 0x30, 0xfa, 0x36, 0x14, 0x36, - 0x1a, 0x86, 0x43, 0xad, 0xae, 0xc8, 0x8f, 0x65, 0xb7, 0x59, 0x3a, 0x3f, 0x6b, 0x1b, 0xd7, 0x31, - 0x96, 0x1c, 0xa4, 0xc3, 0x2c, 0x79, 0x60, 0x11, 0x8f, 0x97, 0x73, 0xcb, 0xd3, 0x2b, 0x73, 0x06, - 0x0c, 0x07, 0xd5, 0xd9, 0x1b, 0x92, 0x82, 0x15, 0x47, 0xff, 0x36, 0x07, 0x05, 0x65, 0x16, 0xdd, - 0x83, 0xa2, 0x68, 0x9f, 0xb6, 0xc9, 0x4d, 0x89, 0x5a, 0x5a, 0x7d, 0x2b, 0xe1, 0x6f, 0x54, 0xcd, - 0x9a, 0xd7, 0xed, 0x08, 0x82, 0x5f, 0x13, 0xd2, 0xc2, 0xf7, 0xed, 0x9d, 0xfb, 0xc4, 0xe2, 0x77, - 0x08, 0x37, 0x0d, 0xa4, 0xfc, 0x80, 0x98, 0x86, 0x23, 0x54, 0xb4, 0x0e, 0x33, 0xbe, 0x47, 0x2c, - 0x95, 0xf8, 0x73, 0x47, 0x26, 0xbe, 0xe9, 0x11, 0x2b, 0x0e, 0x4d, 0x7c, 0x61, 0x09, 0x80, 0xb6, - 0x61, 0xd6, 0xe7, 0x26, 0xef, 0xfb, 0xb2, 0xf0, 0xa5, 0xd5, 0x0b, 0x47, 0x43, 0x49, 0x71, 0x63, - 0x51, 0x81, 0xcd, 0x06, 0xdf, 0x58, 0xc1, 0xe8, 0x7f, 0x68, 0xb0, 0x38, 0x5a, 0x6d, 0x74, 0x17, - 0x0a, 0x3e, 0x61, 0xfb, 0xb6, 0x45, 0xca, 0x33, 0xd2, 0x48, 0xfd, 0x68, 0x23, 0x81, 0x7c, 0xd8, - 0x2f, 0x25, 0xd1, 0x2b, 0x8a, 0x86, 0x43, 0x30, 0xf4, 0x31, 0x14, 0x19, 0xf1, 0x69, 0x9f, 0x59, - 0x44, 0x79, 0x7f, 0x29, 0x09, 0x2c, 0xce, 0xbd, 0x80, 0x14, 0xcd, 0xda, 0xde, 0xa4, 0x96, 0xe9, - 0x04, 0xa9, 0xc4, 0x64, 0x97, 0x30, 0xe2, 0x5a, 0xc4, 0x98, 0x17, 0x5d, 0x8e, 0x15, 0x04, 0x8e, - 0xc0, 0xc4, 0x29, 0x9a, 0x57, 0x8e, 0xac, 0x39, 0xe6, 0xb1, 0x14, 0x74, 0x73, 0xa4, 0xa0, 0x6f, - 0x1c, 0x99, 0x20, 0xe9, 0xd7, 0xa4, 0xaa, 0xea, 0xbf, 0x69, 0xb0, 0x94, 0x14, 0xdc, 0xb4, 0x7d, - 0x8e, 0x3e, 0x1b, 0x0b, 0xa2, 0x96, 0x2d, 0x08, 0xa1, 0x2d, 0x43, 0x58, 0x52, 0xa6, 0x8a, 0x21, - 0x25, 0x11, 0xc0, 0x6d, 0xc8, 0xdb, 0x9c, 0xf4, 0x7c, 0x79, 0x44, 0x4a, 0xab, 0xe7, 0xb3, 0x45, - 0x10, 0x9f, 0xce, 0x0d, 0xa1, 0x8c, 0x03, 0x0c, 0xfd, 0x6f, 0x0d, 0xaa, 0x49, 0xb1, 0x86, 0xc9, - 0xcc, 0x1e, 0xe1, 0x84, 0xf9, 0x51, 0xf1, 0xd0, 0x0a, 0x14, 0xcd, 0xc6, 0xc6, 0x3a, 0xa3, 0x7d, - 0x2f, 0x3c, 0xba, 0xc2, 0xb5, 0x6b, 0x8a, 0x86, 0x23, 0xae, 0x38, 0xe0, 0x5d, 0x5b, 0x4d, 0xa9, - 0xc4, 0x01, 0xbf, 0x6d, 0xbb, 0x6d, 0x2c, 0x39, 0x42, 0xc2, 0x35, 0x7b, 0xe1, 0xf0, 0x8b, 0x24, - 0xb6, 0xcc, 0x1e, 0xc1, 0x92, 0x83, 0xaa, 0x90, 0xf7, 0x2d, 0xea, 0x05, 0x1d, 0x3c, 0x67, 0xcc, - 0x09, 0x97, 0x9b, 0x82, 0x80, 0x03, 0x3a, 0xba, 0x08, 0x73, 0x42, 0xd0, 0xf7, 0x4c, 0x8b, 0x94, - 0xf3, 0x52, 0x68, 0x61, 0x38, 0xa8, 0xce, 0x6d, 0x85, 0x44, 0x1c, 0xf3, 0xf5, 0x9f, 0x52, 0xf5, - 0x11, 0xa5, 0x43, 0xab, 0x00, 0x16, 0x75, 0x39, 0xa3, 0x8e, 0x43, 0xc2, 0x69, 0x14, 0x35, 0xcd, - 0x5a, 0xc4, 0xc1, 0x09, 0x29, 0x64, 0x03, 0x78, 0x51, 0x6e, 0x54, 0xf3, 0xbc, 0x9b, 0x2d, 0xf5, - 0x87, 0xe4, 0xd4, 0x58, 0x14, 0xa6, 0x12, 0x8c, 0x04, 0xb8, 0xfe, 0xb3, 0x06, 0x25, 0xa5, 0x7f, - 0x0c, 0xed, 0x74, 0x73, 0xb4, 0x9d, 0x5e, 0x3d, 0x7a, 0xb5, 0x1c, 0xde, 0x49, 0x3f, 0xc6, 0x5e, - 0x8b, 0x65, 0x22, 0x2a, 0xbd, 0x47, 0x7d, 0x9e, 0x1e, 0xf6, 0xb7, 0xa8, 0xcf, 0xb1, 0xe4, 0x20, - 0x0f, 0x96, 0xec, 0xd4, 0xf6, 0xc9, 0x7c, 0x2a, 0x23, 0x0d, 0xa3, 0xac, 0x90, 0x97, 0xd2, 0x1c, - 0x3c, 0x86, 0xae, 0xdf, 0x83, 0x31, 0x29, 0x31, 0x0f, 0xf6, 0x38, 0xf7, 0x0e, 0xc9, 0xec, 0xe4, - 0x75, 0x17, 0x5b, 0x2f, 0xca, 0x98, 0x5a, 0xad, 0x06, 0x96, 0x28, 0xfa, 0x77, 0x1a, 0x9c, 0x39, - 0x74, 0xb2, 0x46, 0x9d, 0xaf, 0x4d, 0xec, 0xfc, 0x2d, 0x98, 0xf1, 0x28, 0xe3, 0x2a, 0x07, 0x6f, - 0x4e, 0xf6, 0x64, 0x14, 0xb9, 0x41, 0x19, 0x4f, 0x5c, 0x36, 0x28, 0xe3, 0x58, 0xe2, 0xe8, 0x7f, - 0xe6, 0xa2, 0x8a, 0xc8, 0xb6, 0xff, 0x20, 0xca, 0xb7, 0x6c, 0x4b, 0x61, 0x59, 0x1d, 0xb2, 0xd3, - 0x89, 0xfc, 0x45, 0x3c, 0x3c, 0x26, 0x8d, 0xda, 0xb0, 0xd8, 0x26, 0xbb, 0x66, 0xdf, 0xe1, 0xca, - 0xb6, 0xca, 0x5a, 0xf6, 0xfb, 0x08, 0x1a, 0x0e, 0xaa, 0x8b, 0xd7, 0x47, 0x30, 0x70, 0x0a, 0x13, - 0xad, 0xc1, 0x34, 0x77, 0xc2, 0x7e, 0x7c, 0xed, 0x48, 0xe8, 0xd6, 0x66, 0xd3, 0x28, 0xa9, 0xf0, - 0xa7, 0x5b, 0x9b, 0x4d, 0x2c, 0xb4, 0xd1, 0x87, 0x90, 0x67, 0x7d, 0x87, 0x88, 0x6d, 0x3b, 0x9d, - 0x69, 0x71, 0x8b, 0x9a, 0xc6, 0xad, 0x2d, 0xbe, 0x7c, 0x1c, 0x40, 0xe8, 0x5f, 0xc2, 0xc2, 0xc8, - 0x4a, 0x46, 0xf7, 0x60, 0xde, 0xa1, 0x66, 0xdb, 0x30, 0x1d, 0xd3, 0xb5, 0xd4, 0x08, 0x49, 0x4d, - 0xe2, 0x70, 0x27, 0x6e, 0x26, 0xe4, 0xd4, 0x42, 0x3f, 0xad, 0x8c, 0xcc, 0x27, 0x79, 0x78, 0x04, - 0x51, 0x37, 0x01, 0xe2, 0xf0, 0xc4, 0x4c, 0x14, 0x27, 0x26, 0xb8, 0x93, 0xa9, 0x99, 0x28, 0x0e, - 0x92, 0x8f, 0x03, 0xba, 0x98, 0x68, 0x3e, 0xb1, 0x18, 0xe1, 0xb2, 0xa8, 0xb9, 0xd1, 0x89, 0xd6, - 0x8c, 0x38, 0x38, 0x21, 0xa5, 0xff, 0x90, 0x83, 0x85, 0xad, 0x20, 0x13, 0x0d, 0xea, 0xd8, 0xd6, - 0xc3, 0x63, 0x58, 0xbe, 0x77, 0x46, 0x96, 0xef, 0xc5, 0xc9, 0x45, 0x19, 0x71, 0x6c, 0xe2, 0x9d, - 0xea, 0xa3, 0xd4, 0x9d, 0xea, 0x52, 0x56, 0xc0, 0xe7, 0xdf, 0xac, 0x7e, 0xd1, 0xe0, 0x7f, 0x23, - 0xf2, 0x37, 0xe2, 0xb1, 0xd6, 0x80, 0xbc, 0x38, 0x5c, 0xe1, 0xf5, 0x38, 0x6b, 0x08, 0xf2, 0x90, - 0xc6, 0x17, 0x64, 0x81, 0x80, 0x03, 0x20, 0xb4, 0x0e, 0x39, 0x4e, 0x55, 0xb7, 0x67, 0x86, 0x23, - 0x84, 0x19, 0xa0, 0xe0, 0x72, 0x2d, 0x8a, 0x73, 0x9c, 0xea, 0xbf, 0x6a, 0x50, 0x1e, 0x91, 0x4a, - 0x8e, 0xe3, 0x97, 0xef, 0xf7, 0x1d, 0x98, 0xd9, 0x65, 0xb4, 0xf7, 0x22, 0x9e, 0x47, 0xb5, 0xbc, - 0xc9, 0x68, 0x0f, 0x4b, 0x18, 0xfd, 0x91, 0x06, 0x27, 0x47, 0x24, 0x8f, 0x61, 0xf7, 0x6d, 0x8e, - 0xee, 0xbe, 0x0b, 0x19, 0x63, 0x98, 0xb0, 0x01, 0x1f, 0xe5, 0x52, 0x11, 0x88, 0x58, 0xd1, 0x2e, - 0x94, 0x3c, 0xda, 0x6e, 0x12, 0x87, 0x58, 0x9c, 0x86, 0xa3, 0xe2, 0x4a, 0xc6, 0x20, 0xcc, 0x1d, - 0xe2, 0x84, 0xaa, 0xc6, 0x89, 0xe1, 0xa0, 0x5a, 0x6a, 0xc4, 0x58, 0x38, 0x09, 0x8c, 0x1e, 0xc0, - 0xc9, 0xe8, 0xda, 0x13, 0x59, 0xcb, 0xbd, 0xb8, 0xb5, 0x33, 0xc3, 0x41, 0xf5, 0xe4, 0x56, 0x1a, - 0x11, 0x8f, 0x1b, 0x41, 0xb7, 0xa0, 0x60, 0x7b, 0xf2, 0x85, 0xa7, 0x8e, 0xe1, 0xf3, 0xee, 0x10, - 0xc1, 0x53, 0x30, 0x78, 0x67, 0xa8, 0x0f, 0x1c, 0xaa, 0xeb, 0x7f, 0xa5, 0x7b, 0x40, 0x34, 0x1c, - 0x5a, 0x87, 0xa2, 0x7c, 0x73, 0x5b, 0xd4, 0x51, 0xdb, 0xf3, 0xa2, 0x7c, 0x34, 0x2b, 0xda, 0xc1, - 0xa0, 0xfa, 0xff, 0xf1, 0x1f, 0x21, 0x6a, 0x21, 0x1b, 0x47, 0xca, 0xa9, 0x05, 0x3b, 0x79, 0xb6, - 0x89, 0x77, 0x7f, 0x2d, 0x78, 0xf7, 0xd7, 0x36, 0x5c, 0xbe, 0xcd, 0x9a, 0x9c, 0xd9, 0x6e, 0x27, - 0x58, 0xf6, 0xf1, 0x82, 0x45, 0xe7, 0xa0, 0xa0, 0xf6, 0xaf, 0x0c, 0x3c, 0x1f, 0x44, 0x75, 0x23, - 0x20, 0xe1, 0x90, 0xa7, 0x1f, 0xa4, 0xfb, 0x42, 0x6e, 0xe3, 0xfb, 0x2f, 0xad, 0x2f, 0x4e, 0xa9, - 0x6e, 0x9c, 0xdc, 0x1b, 0x9f, 0x43, 0x41, 0xed, 0x72, 0xd5, 0xe9, 0xab, 0x19, 0x3b, 0x3d, 0xb9, - 0x1b, 0xa3, 0x9f, 0x12, 0x42, 0x62, 0x88, 0x89, 0x3e, 0x81, 0x59, 0x12, 0xa0, 0x07, 0xcb, 0xf6, - 0x72, 0x46, 0xf4, 0x78, 0xac, 0xc6, 0xa3, 0x58, 0xd1, 0x14, 0x20, 0x7a, 0x5f, 0x64, 0x49, 0xc8, - 0x8a, 0xb7, 0xa5, 0x5f, 0x9e, 0x91, 0xfb, 0xef, 0x95, 0x20, 0xd8, 0x88, 0x7c, 0x20, 0xee, 0xd2, - 0xd1, 0x27, 0x4e, 0x6a, 0xe8, 0x5f, 0xc3, 0xa9, 0x43, 0x46, 0x3f, 0xb2, 0xe4, 0x13, 0xa0, 0x6d, - 0x73, 0x9b, 0xba, 0xe1, 0x4c, 0xac, 0x67, 0x4b, 0xfe, 0x5a, 0xa8, 0x37, 0xf2, 0x66, 0x50, 0x50, - 0x38, 0x01, 0xab, 0x7f, 0x01, 0x68, 0xfc, 0xaa, 0x96, 0xe1, 0x22, 0x78, 0x1e, 0x66, 0xdd, 0x7e, - 0x6f, 0x87, 0x04, 0xe7, 0x37, 0x1f, 0x27, 0x67, 0x4b, 0x52, 0xb1, 0xe2, 0x1a, 0xef, 0x3d, 0x7e, - 0x56, 0x99, 0x7a, 0xf2, 0xac, 0x32, 0xf5, 0xf4, 0x59, 0x65, 0xea, 0x9b, 0x61, 0x45, 0x7b, 0x3c, - 0xac, 0x68, 0x4f, 0x86, 0x15, 0xed, 0xe9, 0xb0, 0xa2, 0xfd, 0x33, 0xac, 0x68, 0xdf, 0xff, 0x5b, - 0x99, 0xfa, 0xb4, 0x3c, 0xe9, 0x47, 0xc1, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x8f, 0x43, 0xa0, - 0xa5, 0x48, 0x14, 0x00, 0x00, + // 1671 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcb, 0x6f, 0x1b, 0xd5, + 0x1a, 0xcf, 0x38, 0x71, 0xec, 0x1c, 0x27, 0x69, 0x72, 0x6e, 0xab, 0xeb, 0x9b, 0xab, 0x6b, 0xe7, + 0x8e, 0x68, 0x1b, 0x68, 0x6b, 0xd3, 0xb4, 0x42, 0xb0, 0x01, 0x3a, 0x69, 0x9a, 0x86, 0xa6, 0x8e, + 0x75, 0x6c, 0x15, 0x81, 0x78, 0x74, 0x32, 0x3e, 0xb1, 0xa7, 0x1e, 0xcf, 0x19, 0x9d, 0x39, 0x0e, + 0xad, 0x84, 0x10, 0x1b, 0x16, 0xec, 0xf8, 0x17, 0x10, 0x7f, 0x01, 0x82, 0x05, 0x12, 0x82, 0xc2, + 0x06, 0x75, 0x59, 0x89, 0x4d, 0x37, 0x58, 0xd4, 0xfc, 0x17, 0x59, 0xa1, 0xf3, 0x98, 0x97, 0x1f, + 0xb5, 0xa9, 0xaa, 0xac, 0x92, 0xf3, 0x7d, 0xdf, 0xf9, 0x7d, 0x8f, 0xf3, 0xbd, 0xc6, 0xe0, 0x5a, + 0xfb, 0x75, 0xbf, 0x64, 0x93, 0x72, 0xbb, 0x7b, 0x80, 0xa9, 0x8b, 0x19, 0xf6, 0xcb, 0x47, 0xd8, + 0x6d, 0x10, 0x5a, 0x56, 0x0c, 0xd3, 0xb3, 0xcb, 0x2e, 0x66, 0x9f, 0x10, 0xda, 0xb6, 0xdd, 0x66, + 0xf9, 0xe8, 0x72, 0xb9, 0x89, 0x5d, 0x4c, 0x4d, 0x86, 0x1b, 0x25, 0x8f, 0x12, 0x46, 0x60, 0x5e, + 0x4a, 0x96, 0x4c, 0xcf, 0x2e, 0x45, 0x92, 0xa5, 0xa3, 0xcb, 0x6b, 0x97, 0x9a, 0x36, 0x6b, 0x75, + 0x0f, 0x4a, 0x16, 0xe9, 0x94, 0x9b, 0xa4, 0x49, 0xca, 0xe2, 0xc2, 0x41, 0xf7, 0x50, 0x9c, 0xc4, + 0x41, 0xfc, 0x27, 0x81, 0xd6, 0xf4, 0x98, 0x4a, 0x8b, 0x50, 0x3c, 0x42, 0xd9, 0xda, 0xd5, 0x48, + 0xa6, 0x63, 0x5a, 0x2d, 0xdb, 0xc5, 0xf4, 0x41, 0xd9, 0x6b, 0x37, 0x39, 0xc1, 0x2f, 0x77, 0x30, + 0x33, 0x47, 0xdd, 0x2a, 0x8f, 0xbb, 0x45, 0xbb, 0x2e, 0xb3, 0x3b, 0x78, 0xe8, 0xc2, 0x6b, 0x93, + 0x2e, 0xf8, 0x56, 0x0b, 0x77, 0xcc, 0xa1, 0x7b, 0x57, 0xc6, 0xdd, 0xeb, 0x32, 0xdb, 0x29, 0xdb, + 0x2e, 0xf3, 0x19, 0x1d, 0xbc, 0xa4, 0xff, 0xac, 0x81, 0x53, 0x37, 0xeb, 0xf5, 0xea, 0xae, 0xdb, + 0xa4, 0xd8, 0xf7, 0xab, 0x26, 0x6b, 0xc1, 0x75, 0x30, 0xe7, 0x99, 0xac, 0x95, 0xd7, 0xd6, 0xb5, + 0x8d, 0x05, 0x63, 0xf1, 0x51, 0xaf, 0x38, 0xd3, 0xef, 0x15, 0xe7, 0x38, 0x0f, 0x09, 0x0e, 0xbc, + 0x0a, 0xb2, 0xfc, 0x6f, 0xfd, 0x81, 0x87, 0xf3, 0xb3, 0x42, 0x2a, 0xdf, 0xef, 0x15, 0xb3, 0x55, + 0x45, 0x3b, 0x8e, 0xfd, 0x8f, 0x42, 0x49, 0x58, 0x03, 0x99, 0x03, 0xd3, 0x6a, 0x63, 0xb7, 0x91, + 0x4f, 0xad, 0x6b, 0x1b, 0xb9, 0xcd, 0x8d, 0xd2, 0xb8, 0xe7, 0x2b, 0x29, 0x7b, 0x0c, 0x29, 0x6f, + 0x9c, 0x52, 0x46, 0x64, 0x14, 0x01, 0x05, 0x48, 0xfa, 0x21, 0x38, 0x1d, 0xb3, 0x1f, 0x75, 0x1d, + 0x7c, 0xc7, 0x74, 0xba, 0x18, 0x56, 0x40, 0x9a, 0x2b, 0xf6, 0xf3, 0xda, 0xfa, 0xec, 0x46, 0x6e, + 0xf3, 0xe5, 0xf1, 0xaa, 0x06, 0xdc, 0x37, 0x96, 0x94, 0xae, 0x34, 0x3f, 0xf9, 0x48, 0xc2, 0xe8, + 0xfb, 0x20, 0xb3, 0x5b, 0x35, 0x1c, 0x62, 0xb5, 0x79, 0x7c, 0x2c, 0xbb, 0x41, 0x07, 0xe3, 0xb3, + 0xb5, 0x7b, 0x1d, 0x21, 0xc1, 0x81, 0x3a, 0x98, 0xc7, 0xf7, 0x2d, 0xec, 0xb1, 0x7c, 0x6a, 0x7d, + 0x76, 0x63, 0xc1, 0x00, 0xfd, 0x5e, 0x71, 0x7e, 0x5b, 0x50, 0x90, 0xe2, 0xe8, 0x5f, 0xa4, 0x40, + 0x46, 0xa9, 0x85, 0x77, 0x41, 0x96, 0xa7, 0x4f, 0xc3, 0x64, 0xa6, 0x40, 0xcd, 0x6d, 0xbe, 0x1a, + 0xb3, 0x37, 0x7c, 0xcd, 0x92, 0xd7, 0x6e, 0x72, 0x82, 0x5f, 0xe2, 0xd2, 0xdc, 0xf6, 0xfd, 0x83, + 0x7b, 0xd8, 0x62, 0xb7, 0x31, 0x33, 0x0d, 0xa8, 0xec, 0x00, 0x11, 0x0d, 0x85, 0xa8, 0x70, 0x07, + 0xcc, 0xf9, 0x1e, 0xb6, 0x54, 0xe0, 0xcf, 0x4e, 0x0c, 0x7c, 0xcd, 0xc3, 0x56, 0xe4, 0x1a, 0x3f, + 0x21, 0x01, 0x00, 0xf7, 0xc1, 0xbc, 0xcf, 0x4c, 0xd6, 0xf5, 0xc5, 0xc3, 0xe7, 0x36, 0xcf, 0x4f, + 0x86, 0x12, 0xe2, 0xc6, 0xb2, 0x02, 0x9b, 0x97, 0x67, 0xa4, 0x60, 0xf4, 0x5f, 0x35, 0xb0, 0x9c, + 0x7c, 0x6d, 0x78, 0x07, 0x64, 0x7c, 0x4c, 0x8f, 0x6c, 0x0b, 0xe7, 0xe7, 0x84, 0x92, 0xf2, 0x64, + 0x25, 0x52, 0x3e, 0xc8, 0x97, 0x1c, 0xcf, 0x15, 0x45, 0x43, 0x01, 0x18, 0x7c, 0x17, 0x64, 0x29, + 0xf6, 0x49, 0x97, 0x5a, 0x58, 0x59, 0x7f, 0x29, 0x0e, 0xcc, 0xeb, 0x9e, 0x43, 0xf2, 0x64, 0x6d, + 0xec, 0x11, 0xcb, 0x74, 0x64, 0x28, 0x11, 0x3e, 0xc4, 0x14, 0xbb, 0x16, 0x36, 0x16, 0x79, 0x96, + 0x23, 0x05, 0x81, 0x42, 0x30, 0x5e, 0x45, 0x8b, 0xca, 0x90, 0x2d, 0xc7, 0x3c, 0x91, 0x07, 0xdd, + 0x4b, 0x3c, 0xe8, 0x2b, 0x13, 0x03, 0x24, 0xec, 0x1a, 0xf7, 0xaa, 0xfa, 0x4f, 0x1a, 0x58, 0x89, + 0x0b, 0xee, 0xd9, 0x3e, 0x83, 0x1f, 0x0c, 0x39, 0x51, 0x9a, 0xce, 0x09, 0x7e, 0x5b, 0xb8, 0xb0, + 0xa2, 0x54, 0x65, 0x03, 0x4a, 0xcc, 0x81, 0x5b, 0x20, 0x6d, 0x33, 0xdc, 0xf1, 0x45, 0x89, 0xe4, + 0x36, 0xcf, 0x4d, 0xe7, 0x41, 0x54, 0x9d, 0xbb, 0xfc, 0x32, 0x92, 0x18, 0xfa, 0x1f, 0x1a, 0x28, + 0xc6, 0xc5, 0xaa, 0x26, 0x35, 0x3b, 0x98, 0x61, 0xea, 0x87, 0x8f, 0x07, 0x37, 0x40, 0xd6, 0xac, + 0xee, 0xee, 0x50, 0xd2, 0xf5, 0x82, 0xd2, 0xe5, 0xa6, 0x5d, 0x53, 0x34, 0x14, 0x72, 0x79, 0x81, + 0xb7, 0x6d, 0xd5, 0xa5, 0x62, 0x05, 0x7e, 0xcb, 0x76, 0x1b, 0x48, 0x70, 0xb8, 0x84, 0x6b, 0x76, + 0x82, 0xe6, 0x17, 0x4a, 0x54, 0xcc, 0x0e, 0x46, 0x82, 0x03, 0x8b, 0x20, 0xed, 0x5b, 0xc4, 0x93, + 0x19, 0xbc, 0x60, 0x2c, 0x70, 0x93, 0x6b, 0x9c, 0x80, 0x24, 0x1d, 0x5e, 0x00, 0x0b, 0x5c, 0xd0, + 0xf7, 0x4c, 0x0b, 0xe7, 0xd3, 0x42, 0x68, 0xa9, 0xdf, 0x2b, 0x2e, 0x54, 0x02, 0x22, 0x8a, 0xf8, + 0xfa, 0xb7, 0x03, 0xef, 0xc3, 0x9f, 0x0e, 0x6e, 0x02, 0x60, 0x11, 0x97, 0x51, 0xe2, 0x38, 0x38, + 0xe8, 0x46, 0x61, 0xd2, 0x6c, 0x85, 0x1c, 0x14, 0x93, 0x82, 0x36, 0x00, 0x5e, 0x18, 0x1b, 0x95, + 0x3c, 0x6f, 0x4c, 0x17, 0xfa, 0x11, 0x31, 0x35, 0x96, 0xb9, 0xaa, 0x18, 0x23, 0x06, 0xae, 0x7f, + 0xa7, 0x81, 0x9c, 0xba, 0x7f, 0x02, 0xe9, 0x74, 0x23, 0x99, 0x4e, 0xff, 0x9f, 0x3c, 0x5a, 0x46, + 0x67, 0xd2, 0x0f, 0x1a, 0x58, 0x0b, 0xac, 0x26, 0x66, 0xc3, 0x30, 0x1d, 0xd3, 0xb5, 0x30, 0x0d, + 0x3a, 0xf5, 0x1a, 0x48, 0xd9, 0x41, 0xfa, 0x00, 0x05, 0x90, 0xda, 0xad, 0xa2, 0x94, 0xed, 0xc1, + 0x8b, 0x20, 0xdb, 0x22, 0x3e, 0x13, 0x89, 0x21, 0x53, 0x27, 0x34, 0xf8, 0xa6, 0xa2, 0xa3, 0x50, + 0x02, 0x56, 0x41, 0xda, 0x23, 0x94, 0xf9, 0xf9, 0x39, 0x61, 0xf0, 0x85, 0x89, 0x06, 0x57, 0x09, + 0x65, 0xaa, 0x97, 0x46, 0x23, 0x8a, 0x23, 0x20, 0x09, 0xa4, 0x7f, 0x0a, 0xfe, 0x33, 0xc2, 0x72, + 0x79, 0x05, 0x7e, 0x0c, 0x32, 0xb6, 0x64, 0xaa, 0x89, 0x78, 0x75, 0xa2, 0xc2, 0x11, 0xfe, 0x47, + 0x83, 0x38, 0x18, 0xb8, 0x01, 0xaa, 0xfe, 0x8d, 0x06, 0x56, 0x87, 0x2c, 0x15, 0xbb, 0x04, 0xa1, + 0x4c, 0x44, 0x2c, 0x1d, 0xdb, 0x25, 0x08, 0x65, 0x48, 0x70, 0xe0, 0x2d, 0x90, 0x15, 0xab, 0x88, + 0x45, 0x1c, 0x15, 0xb5, 0x72, 0x10, 0xb5, 0xaa, 0xa2, 0x1f, 0xf7, 0x8a, 0xff, 0x1d, 0xde, 0xcf, + 0x4a, 0x01, 0x1b, 0x85, 0x00, 0xbc, 0xea, 0x30, 0xa5, 0x84, 0xaa, 0xc2, 0x14, 0x55, 0xb7, 0xcd, + 0x09, 0x48, 0xd2, 0xf5, 0xaf, 0xa3, 0xa4, 0xe4, 0xbb, 0x02, 0xb7, 0x8f, 0xbf, 0xc8, 0xe0, 0x2c, + 0xe7, 0xef, 0x85, 0x04, 0x07, 0x7a, 0x60, 0xc5, 0x1e, 0x58, 0x2e, 0xa6, 0x6e, 0xba, 0xe1, 0x0d, + 0x23, 0xaf, 0x90, 0x57, 0x06, 0x39, 0x68, 0x08, 0x5d, 0xbf, 0x0b, 0x86, 0xa4, 0x78, 0xbb, 0x6f, + 0x31, 0xe6, 0x8d, 0x28, 0x9c, 0xf1, 0xdb, 0x4c, 0xa4, 0x3d, 0x2b, 0x7c, 0xaa, 0xd7, 0xab, 0x48, + 0xa0, 0xe8, 0x5f, 0x6a, 0xe0, 0xcc, 0xc8, 0xc1, 0x19, 0x36, 0x36, 0x6d, 0x6c, 0x63, 0xab, 0xa8, + 0x17, 0x95, 0x31, 0xb8, 0x38, 0xde, 0x92, 0x24, 0x32, 0x7f, 0xf1, 0x51, 0xef, 0xaf, 0xff, 0x96, + 0x0a, 0x5f, 0x44, 0x74, 0xb5, 0xb7, 0xc3, 0x78, 0x8b, 0xae, 0xc3, 0x35, 0xab, 0x1e, 0x7a, 0x3a, + 0x16, 0xbf, 0x90, 0x87, 0x86, 0xa4, 0x61, 0x03, 0x2c, 0x37, 0xf0, 0xa1, 0xd9, 0x75, 0x98, 0xd2, + 0xad, 0xa2, 0x36, 0xfd, 0xba, 0x09, 0xfb, 0xbd, 0xe2, 0xf2, 0xf5, 0x04, 0x06, 0x1a, 0xc0, 0x84, + 0x5b, 0x60, 0x96, 0x39, 0x41, 0xbb, 0x79, 0x69, 0x22, 0x74, 0x7d, 0xaf, 0x66, 0xe4, 0x94, 0xfb, + 0xb3, 0xf5, 0xbd, 0x1a, 0xe2, 0xb7, 0xe1, 0x3b, 0x20, 0x4d, 0xbb, 0x0e, 0xe6, 0xcb, 0xd4, 0xec, + 0x54, 0x7b, 0x19, 0x7f, 0xd3, 0xa8, 0xfc, 0xf9, 0xc9, 0x47, 0x12, 0x42, 0xff, 0x0c, 0x2c, 0x25, + 0x36, 0x2e, 0xd8, 0x01, 0x8b, 0x4e, 0xac, 0x84, 0x55, 0x14, 0xae, 0xfc, 0xa3, 0xba, 0x57, 0x0d, + 0xe7, 0xb4, 0xd2, 0xb8, 0x18, 0xe7, 0xa1, 0x04, 0xbc, 0x6e, 0x02, 0x10, 0xf9, 0xca, 0x2b, 0x91, + 0x97, 0x8f, 0xec, 0x36, 0xaa, 0x12, 0x79, 0x55, 0xf9, 0x48, 0xd2, 0xf9, 0xf4, 0xf2, 0xb1, 0x45, + 0x31, 0xab, 0x44, 0xfd, 0x32, 0x9c, 0x5e, 0xb5, 0x90, 0x83, 0x62, 0x52, 0xfa, 0x2f, 0x1a, 0x58, + 0xaa, 0x48, 0x93, 0xab, 0xc4, 0xb1, 0xad, 0x07, 0x27, 0xb0, 0x68, 0xdd, 0x4e, 0x2c, 0x5a, 0xcf, + 0x68, 0xd3, 0x09, 0xc3, 0xc6, 0x6e, 0x5a, 0xdf, 0x6b, 0xe0, 0xdf, 0x09, 0xc9, 0xed, 0xa8, 0x19, + 0x85, 0x23, 0x41, 0x9b, 0x34, 0x12, 0x12, 0x08, 0xa2, 0xb4, 0x46, 0x8e, 0x04, 0xb8, 0x03, 0x52, + 0x8c, 0xa8, 0x1c, 0x9d, 0x1a, 0x0e, 0x63, 0x1a, 0xcd, 0xb6, 0x3a, 0x41, 0x29, 0x46, 0xf4, 0x1f, + 0x35, 0x90, 0x4f, 0x48, 0xc5, 0x9b, 0xe8, 0x8b, 0xb7, 0xfb, 0x36, 0x98, 0x3b, 0xa4, 0xa4, 0xf3, + 0x3c, 0x96, 0x87, 0x41, 0xbf, 0x41, 0x49, 0x07, 0x09, 0x18, 0xfd, 0xa1, 0x06, 0x56, 0x13, 0x92, + 0x27, 0xb0, 0x90, 0xec, 0x25, 0x17, 0x92, 0xf3, 0x53, 0xfa, 0x30, 0x66, 0x2d, 0x79, 0x98, 0x1a, + 0xf0, 0x80, 0xfb, 0x0a, 0x0f, 0x41, 0xce, 0x23, 0x8d, 0x1a, 0x76, 0xb0, 0xc5, 0xc8, 0xa8, 0x02, + 0x7f, 0x96, 0x13, 0xe6, 0x01, 0x76, 0x82, 0xab, 0xc6, 0xa9, 0x7e, 0xaf, 0x98, 0xab, 0x46, 0x58, + 0x28, 0x0e, 0x0c, 0xef, 0x83, 0xd5, 0x70, 0x17, 0x0d, 0xb5, 0xa5, 0x9e, 0x5f, 0xdb, 0x99, 0x7e, + 0xaf, 0xb8, 0x5a, 0x19, 0x44, 0x44, 0xc3, 0x4a, 0xe0, 0x4d, 0x90, 0xb1, 0x3d, 0xf1, 0xd9, 0xad, + 0xbe, 0xd8, 0x9e, 0xb5, 0xd8, 0xc9, 0xef, 0x73, 0xf9, 0xf1, 0xa7, 0x0e, 0x28, 0xb8, 0xae, 0xff, + 0x3e, 0x98, 0x03, 0x3c, 0xe1, 0xe0, 0x4e, 0x6c, 0xfb, 0x90, 0x33, 0xef, 0xc2, 0xf3, 0x6d, 0x1e, + 0xc9, 0xb1, 0x38, 0xbe, 0x09, 0x75, 0x99, 0xed, 0x94, 0xe4, 0x8f, 0x31, 0xa5, 0x5d, 0x97, 0xed, + 0xd3, 0x1a, 0xa3, 0xb6, 0xdb, 0x94, 0x23, 0x3a, 0xb6, 0x16, 0x9d, 0x05, 0x19, 0x35, 0x35, 0x85, + 0xe3, 0x69, 0xe9, 0xd5, 0xb6, 0x24, 0xa1, 0x80, 0xa7, 0x1f, 0x0f, 0xe6, 0x85, 0x98, 0xa1, 0xf7, + 0x5e, 0x58, 0x5e, 0xfc, 0x4b, 0x65, 0xe3, 0xf8, 0xdc, 0xf8, 0x30, 0x5a, 0x2c, 0x65, 0xa6, 0x6f, + 0x4e, 0x99, 0xe9, 0xf1, 0x89, 0x36, 0x76, 0xad, 0x84, 0xef, 0x81, 0x79, 0x2c, 0xd1, 0xe5, 0x88, + 0xbc, 0x3c, 0x25, 0x7a, 0xd4, 0x56, 0xa3, 0x5f, 0x1e, 0x14, 0x4d, 0x01, 0xc2, 0xb7, 0x78, 0x94, + 0xb8, 0x2c, 0xff, 0xe0, 0x97, 0x7b, 0xf8, 0x82, 0xf1, 0x3f, 0xe9, 0x6c, 0x48, 0x3e, 0xe6, 0x1f, + 0x38, 0xe1, 0x11, 0xc5, 0x6f, 0xe8, 0x1f, 0x01, 0x38, 0xbc, 0xe4, 0x4c, 0xb1, 0x42, 0x9d, 0x03, + 0xf3, 0x6e, 0xb7, 0x73, 0x80, 0x65, 0x0d, 0xa5, 0x23, 0x03, 0x2b, 0x82, 0x8a, 0x14, 0xd7, 0x78, + 0xf3, 0xd1, 0xd3, 0xc2, 0xcc, 0xe3, 0xa7, 0x85, 0x99, 0x27, 0x4f, 0x0b, 0x33, 0x9f, 0xf7, 0x0b, + 0xda, 0xa3, 0x7e, 0x41, 0x7b, 0xdc, 0x2f, 0x68, 0x4f, 0xfa, 0x05, 0xed, 0xcf, 0x7e, 0x41, 0xfb, + 0xea, 0xaf, 0xc2, 0xcc, 0xfb, 0xf9, 0x71, 0xbf, 0x96, 0xfe, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xd4, + 0x46, 0x40, 0xf2, 0x61, 0x15, 0x00, 0x00, } func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { @@ -1334,6 +1397,128 @@ func (m *IngressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *IngressLoadBalancerIngress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressLoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressLoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x12 + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressLoadBalancerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressLoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressLoadBalancerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *IngressPortStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressPortStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressPortStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + i -= len(*m.Error) + copy(dAtA[i:], *m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Error))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *IngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1605,16 +1790,6 @@ func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -1963,43 +2138,6 @@ func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *NetworkPolicyStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkPolicyStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetworkPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func (m *ServiceBackendPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2210,43 +2348,93 @@ func (m *IngressList) Size() (n int) { return n } -func (m *IngressRule) Size() (n int) { +func (m *IngressLoadBalancerIngress) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Host) + l = len(m.IP) n += 1 + l + sovGenerated(uint64(l)) - l = m.IngressRuleValue.Size() + l = len(m.Hostname) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *IngressRuleValue) Size() (n int) { +func (m *IngressLoadBalancerStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.HTTP != nil { - l = m.HTTP.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } return n } -func (m *IngressServiceBackend) Size() (n int) { +func (m *IngressPortStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Port.Size() + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) n += 1 + l + sovGenerated(uint64(l)) - return n + if m.Error != nil { + l = len(*m.Error) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *IngressRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = m.IngressRuleValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HTTP != nil { + l = m.HTTP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *IngressServiceBackend) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } func (m *IngressSpec) Size() (n int) { @@ -2316,8 +2504,6 @@ func (m *NetworkPolicy) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) return n } @@ -2450,21 +2636,6 @@ func (m *NetworkPolicySpec) Size() (n int) { return n } -func (m *NetworkPolicyStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func (m *ServiceBackendPort) Size() (n int) { if m == nil { return 0 @@ -2612,6 +2783,50 @@ func (this *IngressList) String() string { }, "") return s } +func (this *IngressLoadBalancerIngress) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]IngressPortStatus{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "IngressPortStatus", "IngressPortStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&IngressLoadBalancerIngress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `}`, + }, "") + return s +} +func (this *IngressLoadBalancerStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForIngress := "[]IngressLoadBalancerIngress{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "IngressLoadBalancerIngress", "IngressLoadBalancerIngress", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + s := strings.Join([]string{`&IngressLoadBalancerStatus{`, + `Ingress:` + repeatedStringForIngress + `,`, + `}`, + }, "") + return s +} +func (this *IngressPortStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressPortStatus{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Error:` + valueToStringGenerated(this.Error) + `,`, + `}`, + }, "") + return s +} func (this *IngressRule) String() string { if this == nil { return "nil" @@ -2672,7 +2887,7 @@ func (this *IngressStatus) String() string { return "nil" } s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LoadBalancer), "LoadBalancerStatus", "v11.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "IngressLoadBalancerStatus", "IngressLoadBalancerStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2695,7 +2910,6 @@ func (this *NetworkPolicy) String() string { s := strings.Join([]string{`&NetworkPolicy{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NetworkPolicyStatus", "NetworkPolicyStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2805,21 +3019,6 @@ func (this *NetworkPolicySpec) String() string { }, "") return s } -func (this *NetworkPolicyStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&NetworkPolicyStatus{`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} func (this *ServiceBackendPort) String() string { if this == nil { return "nil" @@ -3778,14 +3977,397 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { if postIndex < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Scope = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Namespace = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Controller = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parameters == nil { + m.Parameters = &IngressClassParametersReference{} + } + if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Ingress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3813,11 +4395,11 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3845,14 +4427,13 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Scope = &s + m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3862,24 +4443,25 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Namespace = &s + m.Ports = append(m.Ports, IngressPortStatus{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -3902,7 +4484,7 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { +func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3925,47 +4507,15 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group") + return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Controller = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3992,10 +4542,8 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Parameters == nil { - m.Parameters = &IngressClassParametersReference{} - } - if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4020,7 +4568,7 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressList) Unmarshal(dAtA []byte) error { +func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4043,17 +4591,36 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4063,30 +4630,29 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4096,25 +4662,24 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Ingress{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Error = &s iNdEx = postIndex default: iNdEx = preIndex @@ -4932,39 +5497,6 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5819,90 +6351,6 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, v1.Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *ServiceBackendPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index ec8066396..b50dd491e 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -33,14 +33,14 @@ option go_package = "k8s.io/api/networking/v1"; // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. message HTTPIngressPath { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional optional string path = 1; - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -56,7 +56,7 @@ message HTTPIngressPath { // Implementations are required to support all path types. optional string pathType = 3; - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. optional IngressBackend backend = 2; } @@ -67,22 +67,22 @@ message HTTPIngressPath { // to match against everything after the last '/' and before the first '?' // or '#'. message HTTPIngressRuleValue { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. // +listType=atomic repeated HTTPIngressPath paths = 1; } -// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed +// IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs // that should not be included within this rule. message IPBlock { - // CIDR is a string representing the IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // cidr is a string representing the IPBlock + // Valid examples are "192.168.1.0/24" or "2001:db8::/64" optional string cidr = 1; - // Except is a slice of CIDRs that should not be included within an IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" - // Except values will be rejected if they are outside the CIDR range + // except is a slice of CIDRs that should not be included within an IPBlock + // Valid examples are "192.168.1.0/24" or "2001:db8::/64" + // Except values will be rejected if they are outside the cidr range // +optional repeated string except = 2; } @@ -97,12 +97,12 @@ message Ingress { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressSpec spec = 2; - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressStatus status = 3; @@ -110,12 +110,12 @@ message Ingress { // IngressBackend describes all endpoints for a given service and port. message IngressBackend { - // Service references a Service as a Backend. + // service references a service as a backend. // This is a mutually exclusive setting with "Resource". // +optional optional IngressServiceBackend service = 4; - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, a service.Name and // service.Port must not be specified. // This is a mutually exclusive setting with "Service". @@ -134,7 +134,7 @@ message IngressClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressClassSpec spec = 2; @@ -146,31 +146,31 @@ message IngressClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of IngressClasses. + // items is the list of IngressClasses. repeated IngressClass items = 2; } // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. message IngressClassParametersReference { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional optional string aPIGroup = 1; - // Kind is the type of resource being referenced. + // kind is the type of resource being referenced. optional string kind = 2; - // Name is the name of resource being referenced. + // name is the name of resource being referenced. optional string name = 3; - // Scope represents if this refers to a cluster or namespace scoped resource. + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". // +optional optional string scope = 4; - // Namespace is the namespace of the resource being referenced. This field is + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -179,15 +179,15 @@ message IngressClassParametersReference { // IngressClassSpec provides information about the class of an Ingress. message IngressClassSpec { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. optional string controller = 1; - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -201,15 +201,62 @@ message IngressList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of Ingress. + // items is the list of Ingress. repeated Ingress items = 2; } +// IngressLoadBalancerIngress represents the status of a load-balancer ingress point. +message IngressLoadBalancerIngress { + // ip is set for load-balancer ingress points that are IP based. + // +optional + optional string ip = 1; + + // hostname is set for load-balancer ingress points that are DNS based. + // +optional + optional string hostname = 2; + + // ports provides information about the ports exposed by this LoadBalancer. + // +listType=atomic + // +optional + repeated IngressPortStatus ports = 4; +} + +// IngressLoadBalancerStatus represents the status of a load-balancer. +message IngressLoadBalancerStatus { + // ingress is a list containing ingress points for the load-balancer. + // +optional + repeated IngressLoadBalancerIngress ingress = 1; +} + +// IngressPortStatus represents the error condition of a service port +message IngressPortStatus { + // port is the port number of the ingress port. + optional int32 port = 1; + + // protocol is the protocol of the ingress port. + // The supported values are: "TCP", "UDP", "SCTP" + optional string protocol = 2; + + // error is to record the problem with the service port + // The format of the error shall comply with the following rules: + // - built-in error values shall be specified in this file and those shall use + // CamelCase names + // - cloud provider specific error values must have names that comply with the + // format foo.example.com/CamelCase. + // --- + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + optional string error = 3; +} + // IngressRule represents the rules mapping the paths under a specified host to // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. message IngressRule { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -222,14 +269,14 @@ message IngressRule { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header + // 1. If host is precise, the request matches this rule if the http host header is equal to Host. + // 2. If host is a wildcard, then the request matches this rule if the http host header // is to equal to the suffix (removing the first label) of the wildcard rule. // +optional optional string host = 1; @@ -254,48 +301,48 @@ message IngressRuleValue { // IngressServiceBackend references a Kubernetes Service as a Backend. message IngressServiceBackend { - // Name is the referenced service. The service must exist in + // name is the referenced service. The service must exist in // the same namespace as the Ingress object. optional string name = 1; - // Port of the referenced service. A port name or port number + // port of the referenced service. A port name or port number // is required for a IngressServiceBackend. optional ServiceBackendPort port = 2; } // IngressSpec describes the Ingress the user wishes to exist. message IngressSpec { - // IngressClassName is the name of the IngressClass cluster resource. The - // associated IngressClass defines which controller will implement the - // resource. This replaces the deprecated `kubernetes.io/ingress.class` - // annotation. For backwards compatibility, when that annotation is set, it - // must be given precedence over this field. The controller may emit a - // warning if the field and annotation have different values. - // Implementations of this API should ignore Ingresses without a class - // specified. An IngressClass resource may be marked as default, which can - // be used to set a default value for this field. For more information, - // refer to the IngressClass documentation. + // ingressClassName is the name of an IngressClass cluster resource. Ingress + // controller implementations use this field to know whether they should be + // serving this Ingress resource, by a transitive connection + // (controller -> IngressClass -> Ingress resource). Although the + // `kubernetes.io/ingress.class` annotation (simple constant name) was never + // formally defined, it was widely supported by Ingress controllers to create + // a direct binding between Ingress controller and Ingress resources. Newly + // created Ingress resources should prefer using the field. However, even + // though the annotation is officially deprecated, for backwards compatibility + // reasons, ingress controllers should still honor that annotation if present. // +optional optional string ingressClassName = 4; - // DefaultBackend is the backend that should handle requests that don't + // defaultBackend is the backend that should handle requests that don't // match any rule. If Rules are not specified, DefaultBackend must be specified. // If DefaultBackend is not set, the handling of requests that do not match any // of the rules will be up to the Ingress controller. // +optional optional IngressBackend defaultBackend = 1; - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +listType=atomic // +optional repeated IngressTLS tls = 2; - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. + // rules is a list of host rules used to configure the Ingress. If unspecified, + // or no rule matches, all traffic is sent to the default backend. // +listType=atomic // +optional repeated IngressRule rules = 3; @@ -303,14 +350,14 @@ message IngressSpec { // IngressStatus describe the current state of the Ingress. message IngressStatus { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional - optional k8s.io.api.core.v1.LoadBalancerStatus loadBalancer = 1; + optional IngressLoadBalancerStatus loadBalancer = 1; } -// IngressTLS describes the transport layer security associated with an Ingress. +// IngressTLS describes the transport layer security associated with an ingress. message IngressTLS { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. @@ -318,11 +365,11 @@ message IngressTLS { // +optional repeated string hosts = 1; - // SecretName is the name of the secret used to terminate TLS traffic on + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination - // and value of the Host header is used for routing. + // and value of the "Host" header is used for routing. // +optional optional string secretName = 2; } @@ -334,21 +381,16 @@ message NetworkPolicy { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired behavior for this NetworkPolicy. + // spec represents the specification of the desired behavior for this NetworkPolicy. // +optional optional NetworkPolicySpec spec = 2; - - // Status is the current state of the NetworkPolicy. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional NetworkPolicyStatus status = 3; } // NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. // This type is beta-level in 1.8 message NetworkPolicyEgressRule { - // List of destination ports for outgoing traffic. + // ports is a list of destination ports for outgoing traffic. // Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows @@ -356,7 +398,7 @@ message NetworkPolicyEgressRule { // +optional repeated NetworkPolicyPort ports = 1; - // List of destinations for outgoing traffic of pods selected for this rule. + // to is a list of destinations for outgoing traffic of pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all destinations (traffic not restricted by // destination). If this field is present and contains at least one item, this rule @@ -368,15 +410,15 @@ message NetworkPolicyEgressRule { // NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. message NetworkPolicyIngressRule { - // List of ports which should be made accessible on the pods selected for this - // rule. Each item in this list is combined using a logical OR. If this field is + // ports is a list of ports which should be made accessible on the pods selected for + // this rule. Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional repeated NetworkPolicyPort ports = 1; - // List of sources which should be able to access the pods selected for this rule. + // from is a list of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by // source). If this field is present and contains at least one item, this rule @@ -392,32 +434,32 @@ message NetworkPolicyList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated NetworkPolicy items = 2; } // NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of // fields are allowed message NetworkPolicyPeer { - // This is a label selector which selects Pods. This field follows standard label + // podSelector is a label selector which selects pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. // - // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + // If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the pods matching podSelector in the policy's own namespace. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; - // Selects Namespaces using cluster-scoped labels. This field follows standard label - // selector semantics; if present but empty, it selects all namespaces. + // namespaceSelector selects namespaces using cluster-scoped labels. This field follows + // standard label selector semantics; if present but empty, it selects all namespaces. // - // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + // If podSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the namespaces selected by namespaceSelector. + // Otherwise it selects all pods in the namespaces selected by namespaceSelector. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; - // IPBlock defines policy on a particular IPBlock. If this field is set then + // ipBlock defines policy on a particular IPBlock. If this field is set then // neither of the other fields can be. // +optional optional IPBlock ipBlock = 3; @@ -425,40 +467,38 @@ message NetworkPolicyPeer { // NetworkPolicyPort describes a port to allow traffic on message NetworkPolicyPort { - // The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this - // field defaults to TCP. + // protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + // If not specified, this field defaults to TCP. // +optional optional string protocol = 1; - // The port on the given protocol. This can either be a numerical or named + // port represents the port on the given protocol. This can either be a numerical or named // port on a pod. If this field is not provided, this matches all port names and // numbers. // If present, only traffic on the specified protocol AND port will be matched. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; - // If set, indicates that the range of ports from port to endPort, inclusive, + // endPort indicates that the range of ports from port to endPort if set, inclusive, // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. - // This feature is in Beta state and is enabled by default. - // It can be disabled using the Feature Gate "NetworkPolicyEndPort". // +optional optional int32 endPort = 3; } // NetworkPolicySpec provides the specification of a NetworkPolicy message NetworkPolicySpec { - // Selects the pods to which this NetworkPolicy object applies. The array of - // ingress rules is applied to any pods selected by this field. Multiple network - // policies can select the same set of pods. In this case, the ingress rules for - // each are combined additively. This field is NOT optional and follows standard - // label selector semantics. An empty podSelector matches all pods in this - // namespace. + // podSelector selects the pods to which this NetworkPolicy object applies. + // The array of ingress rules is applied to any pods selected by this field. + // Multiple network policies can select the same set of pods. In this case, + // the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; - // List of ingress rules to be applied to the selected pods. Traffic is allowed to - // a pod if there are no NetworkPolicies selecting the pod + // ingress is a list of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod // (and cluster policy otherwise allows the traffic), OR if the traffic source is // the pod's local node, OR if the traffic matches at least one ingress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If @@ -467,8 +507,8 @@ message NetworkPolicySpec { // +optional repeated NetworkPolicyIngressRule ingress = 2; - // List of egress rules to be applied to the selected pods. Outgoing traffic is - // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // egress is a list of egress rules to be applied to the selected pods. Outgoing traffic + // is allowed if there are no NetworkPolicies selecting the pod (and cluster policy // otherwise allows the traffic), OR if the traffic matches at least one egress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves @@ -477,40 +517,28 @@ message NetworkPolicySpec { // +optional repeated NetworkPolicyEgressRule egress = 3; - // List of rule types that the NetworkPolicy relates to. + // policyTypes is a list of rule types that the NetworkPolicy relates to. // Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. - // If this field is not specified, it will default based on the existence of Ingress or Egress rules; - // policies that contain an Egress section are assumed to affect Egress, and all policies - // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If this field is not specified, it will default based on the existence of ingress or egress rules; + // policies that contain an egress section are assumed to affect egress, and all policies + // (whether or not they contain an ingress section) are assumed to affect ingress. // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. // Likewise, if you want to write a policy that specifies that no egress is allowed, // you must specify a policyTypes value that include "Egress" (since such a policy would not include - // an Egress section and would otherwise default to just [ "Ingress" ]). + // an egress section and would otherwise default to just [ "Ingress" ]). // This field is beta-level in 1.8 // +optional repeated string policyTypes = 4; } -// NetworkPolicyStatus describe the current state of the NetworkPolicy. -message NetworkPolicyStatus { - // Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. - // Current service state - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; -} - // ServiceBackendPort is the service port being referenced. message ServiceBackendPort { - // Name is the name of the port on the Service. + // name is the name of the port on the Service. // This is a mutually exclusive setting with "Number". // +optional optional string name = 1; - // Number is the numerical port number (e.g. 80) on the Service. + // number is the numerical port number (e.g. 80) on the Service. // This is a mutually exclusive setting with "Name". // +optional optional int32 number = 2; diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go index bb6da4098..a17e2cb5b 100644 --- a/vendor/k8s.io/api/networking/v1/types.go +++ b/vendor/k8s.io/api/networking/v1/types.go @@ -28,19 +28,20 @@ import ( // NetworkPolicy describes what network traffic is allowed for a set of Pods type NetworkPolicy struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired behavior for this NetworkPolicy. + // spec represents the specification of the desired behavior for this NetworkPolicy. // +optional Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current state of the NetworkPolicy. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Status NetworkPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` + // Status is tombstoned to show why 3 is a reserved protobuf tag. + // This commented field should remain, so in the future if we decide to reimplement + // NetworkPolicyStatus a different protobuf name and tag SHOULD be used! + // Status NetworkPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // PolicyType string describes the NetworkPolicy type @@ -57,16 +58,16 @@ const ( // NetworkPolicySpec provides the specification of a NetworkPolicy type NetworkPolicySpec struct { - // Selects the pods to which this NetworkPolicy object applies. The array of - // ingress rules is applied to any pods selected by this field. Multiple network - // policies can select the same set of pods. In this case, the ingress rules for - // each are combined additively. This field is NOT optional and follows standard - // label selector semantics. An empty podSelector matches all pods in this - // namespace. + // podSelector selects the pods to which this NetworkPolicy object applies. + // The array of ingress rules is applied to any pods selected by this field. + // Multiple network policies can select the same set of pods. In this case, + // the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` - // List of ingress rules to be applied to the selected pods. Traffic is allowed to - // a pod if there are no NetworkPolicies selecting the pod + // ingress is a list of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod // (and cluster policy otherwise allows the traffic), OR if the traffic source is // the pod's local node, OR if the traffic matches at least one ingress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If @@ -75,8 +76,8 @@ type NetworkPolicySpec struct { // +optional Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"` - // List of egress rules to be applied to the selected pods. Outgoing traffic is - // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // egress is a list of egress rules to be applied to the selected pods. Outgoing traffic + // is allowed if there are no NetworkPolicies selecting the pod (and cluster policy // otherwise allows the traffic), OR if the traffic matches at least one egress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves @@ -85,15 +86,15 @@ type NetworkPolicySpec struct { // +optional Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` - // List of rule types that the NetworkPolicy relates to. + // policyTypes is a list of rule types that the NetworkPolicy relates to. // Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. - // If this field is not specified, it will default based on the existence of Ingress or Egress rules; - // policies that contain an Egress section are assumed to affect Egress, and all policies - // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If this field is not specified, it will default based on the existence of ingress or egress rules; + // policies that contain an egress section are assumed to affect egress, and all policies + // (whether or not they contain an ingress section) are assumed to affect ingress. // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. // Likewise, if you want to write a policy that specifies that no egress is allowed, // you must specify a policyTypes value that include "Egress" (since such a policy would not include - // an Egress section and would otherwise default to just [ "Ingress" ]). + // an egress section and would otherwise default to just [ "Ingress" ]). // This field is beta-level in 1.8 // +optional PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"` @@ -102,15 +103,15 @@ type NetworkPolicySpec struct { // NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. type NetworkPolicyIngressRule struct { - // List of ports which should be made accessible on the pods selected for this - // rule. Each item in this list is combined using a logical OR. If this field is + // ports is a list of ports which should be made accessible on the pods selected for + // this rule. Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` - // List of sources which should be able to access the pods selected for this rule. + // from is a list of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by // source). If this field is present and contains at least one item, this rule @@ -123,7 +124,7 @@ type NetworkPolicyIngressRule struct { // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. // This type is beta-level in 1.8 type NetworkPolicyEgressRule struct { - // List of destination ports for outgoing traffic. + // ports is a list of destination ports for outgoing traffic. // Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows @@ -131,7 +132,7 @@ type NetworkPolicyEgressRule struct { // +optional Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` - // List of destinations for outgoing traffic of pods selected for this rule. + // to is a list of destinations for outgoing traffic of pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all destinations (traffic not restricted by // destination). If this field is present and contains at least one item, this rule @@ -142,38 +143,37 @@ type NetworkPolicyEgressRule struct { // NetworkPolicyPort describes a port to allow traffic on type NetworkPolicyPort struct { - // The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this - // field defaults to TCP. + // protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + // If not specified, this field defaults to TCP. // +optional Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/api/core/v1.Protocol"` - // The port on the given protocol. This can either be a numerical or named + // port represents the port on the given protocol. This can either be a numerical or named // port on a pod. If this field is not provided, this matches all port names and // numbers. // If present, only traffic on the specified protocol AND port will be matched. // +optional Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` - // If set, indicates that the range of ports from port to endPort, inclusive, + // endPort indicates that the range of ports from port to endPort if set, inclusive, // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. - // This feature is in Beta state and is enabled by default. - // It can be disabled using the Feature Gate "NetworkPolicyEndPort". // +optional EndPort *int32 `json:"endPort,omitempty" protobuf:"bytes,3,opt,name=endPort"` } -// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed +// IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs // that should not be included within this rule. type IPBlock struct { - // CIDR is a string representing the IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // cidr is a string representing the IPBlock + // Valid examples are "192.168.1.0/24" or "2001:db8::/64" CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"` - // Except is a slice of CIDRs that should not be included within an IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" - // Except values will be rejected if they are outside the CIDR range + + // except is a slice of CIDRs that should not be included within an IPBlock + // Valid examples are "192.168.1.0/24" or "2001:db8::/64" + // Except values will be rejected if they are outside the cidr range // +optional Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` } @@ -181,83 +181,42 @@ type IPBlock struct { // NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of // fields are allowed type NetworkPolicyPeer struct { - // This is a label selector which selects Pods. This field follows standard label + // podSelector is a label selector which selects pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. // - // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + // If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the pods matching podSelector in the policy's own namespace. // +optional PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` - // Selects Namespaces using cluster-scoped labels. This field follows standard label - // selector semantics; if present but empty, it selects all namespaces. + // namespaceSelector selects namespaces using cluster-scoped labels. This field follows + // standard label selector semantics; if present but empty, it selects all namespaces. // - // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + // If podSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the namespaces selected by namespaceSelector. + // Otherwise it selects all pods in the namespaces selected by namespaceSelector. // +optional NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` - // IPBlock defines policy on a particular IPBlock. If this field is set then + // ipBlock defines policy on a particular IPBlock. If this field is set then // neither of the other fields can be. // +optional IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"` } -// NetworkPolicyConditionType is the type for status conditions on -// a NetworkPolicy. This type should be used with the -// NetworkPolicyStatus.Conditions field. -type NetworkPolicyConditionType string - -const ( - // NetworkPolicyConditionStatusAccepted represents status of a Network Policy that could be properly parsed by - // the Network Policy provider and will be implemented in the cluster - NetworkPolicyConditionStatusAccepted NetworkPolicyConditionType = "Accepted" - - // NetworkPolicyConditionStatusPartialFailure represents status of a Network Policy that could be partially - // parsed by the Network Policy provider and may not be completely implemented due to a lack of a feature or some - // other condition - NetworkPolicyConditionStatusPartialFailure NetworkPolicyConditionType = "PartialFailure" - - // NetworkPolicyConditionStatusFailure represents status of a Network Policy that could not be parsed by the - // Network Policy provider and will not be implemented in the cluster - NetworkPolicyConditionStatusFailure NetworkPolicyConditionType = "Failure" -) - -// NetworkPolicyConditionReason defines the set of reasons that explain why a -// particular NetworkPolicy condition type has been raised. -type NetworkPolicyConditionReason string - -const ( - // NetworkPolicyConditionReasonFeatureNotSupported represents a reason where the Network Policy may not have been - // implemented in the cluster due to a lack of some feature not supported by the Network Policy provider - NetworkPolicyConditionReasonFeatureNotSupported NetworkPolicyConditionReason = "FeatureNotSupported" -) - -// NetworkPolicyStatus describe the current state of the NetworkPolicy. -type NetworkPolicyStatus struct { - // Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. - // Current service state - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -} - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // NetworkPolicyList is a list of NetworkPolicy objects. type NetworkPolicyList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -270,17 +229,18 @@ type NetworkPolicyList struct { // based virtual hosting etc. type Ingress struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` @@ -291,83 +251,132 @@ type Ingress struct { // IngressList is a collection of Ingress. type IngressList struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of Ingress. + // items is the list of Ingress. Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` } // IngressSpec describes the Ingress the user wishes to exist. type IngressSpec struct { - // IngressClassName is the name of the IngressClass cluster resource. The - // associated IngressClass defines which controller will implement the - // resource. This replaces the deprecated `kubernetes.io/ingress.class` - // annotation. For backwards compatibility, when that annotation is set, it - // must be given precedence over this field. The controller may emit a - // warning if the field and annotation have different values. - // Implementations of this API should ignore Ingresses without a class - // specified. An IngressClass resource may be marked as default, which can - // be used to set a default value for this field. For more information, - // refer to the IngressClass documentation. + // ingressClassName is the name of an IngressClass cluster resource. Ingress + // controller implementations use this field to know whether they should be + // serving this Ingress resource, by a transitive connection + // (controller -> IngressClass -> Ingress resource). Although the + // `kubernetes.io/ingress.class` annotation (simple constant name) was never + // formally defined, it was widely supported by Ingress controllers to create + // a direct binding between Ingress controller and Ingress resources. Newly + // created Ingress resources should prefer using the field. However, even + // though the annotation is officially deprecated, for backwards compatibility + // reasons, ingress controllers should still honor that annotation if present. // +optional IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"` - // DefaultBackend is the backend that should handle requests that don't + // defaultBackend is the backend that should handle requests that don't // match any rule. If Rules are not specified, DefaultBackend must be specified. // If DefaultBackend is not set, the handling of requests that do not match any // of the rules will be up to the Ingress controller. // +optional DefaultBackend *IngressBackend `json:"defaultBackend,omitempty" protobuf:"bytes,1,opt,name=defaultBackend"` - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +listType=atomic // +optional TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. + // rules is a list of host rules used to configure the Ingress. If unspecified, + // or no rule matches, all traffic is sent to the default backend. // +listType=atomic // +optional Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` } -// IngressTLS describes the transport layer security associated with an Ingress. +// IngressTLS describes the transport layer security associated with an ingress. type IngressTLS struct { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +listType=atomic // +optional Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` - // SecretName is the name of the secret used to terminate TLS traffic on + + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination - // and value of the Host header is used for routing. + // and value of the "Host" header is used for routing. // +optional SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"` } // IngressStatus describe the current state of the Ingress. type IngressStatus struct { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional - LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` + LoadBalancer IngressLoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` +} + +// IngressLoadBalancerStatus represents the status of a load-balancer. +type IngressLoadBalancerStatus struct { + // ingress is a list containing ingress points for the load-balancer. + // +optional + Ingress []IngressLoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` +} + +// IngressLoadBalancerIngress represents the status of a load-balancer ingress point. +type IngressLoadBalancerIngress struct { + // ip is set for load-balancer ingress points that are IP based. + // +optional + IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + + // hostname is set for load-balancer ingress points that are DNS based. + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` + + // ports provides information about the ports exposed by this LoadBalancer. + // +listType=atomic + // +optional + Ports []IngressPortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"` +} + +// IngressPortStatus represents the error condition of a service port +type IngressPortStatus struct { + // port is the port number of the ingress port. + Port int32 `json:"port" protobuf:"varint,1,opt,name=port"` + + // protocol is the protocol of the ingress port. + // The supported values are: "TCP", "UDP", "SCTP" + Protocol v1.Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` + + // error is to record the problem with the service port + // The format of the error shall comply with the following rules: + // - built-in error values shall be specified in this file and those shall use + // CamelCase names + // - cloud provider specific error values must have names that comply with the + // format foo.example.com/CamelCase. + // --- + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` } // IngressRule represents the rules mapping the paths under a specified host to // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -380,14 +389,14 @@ type IngressRule struct { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header + // 1. If host is precise, the request matches this rule if the http host header is equal to Host. + // 2. If host is a wildcard, then the request matches this rule if the http host header // is to equal to the suffix (removing the first label) of the wildcard rule. // +optional Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` @@ -415,7 +424,7 @@ type IngressRuleValue struct { // to match against everything after the last '/' and before the first '?' // or '#'. type HTTPIngressRuleValue struct { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. // +listType=atomic Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` } @@ -454,14 +463,14 @@ const ( // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. type HTTPIngressPath struct { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -477,19 +486,19 @@ type HTTPIngressPath struct { // Implementations are required to support all path types. PathType *PathType `json:"pathType" protobuf:"bytes,3,opt,name=pathType"` - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` } // IngressBackend describes all endpoints for a given service and port. type IngressBackend struct { - // Service references a Service as a Backend. + // service references a service as a backend. // This is a mutually exclusive setting with "Resource". // +optional Service *IngressServiceBackend `json:"service,omitempty" protobuf:"bytes,4,opt,name=service"` - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, a service.Name and // service.Port must not be specified. // This is a mutually exclusive setting with "Service". @@ -499,23 +508,23 @@ type IngressBackend struct { // IngressServiceBackend references a Kubernetes Service as a Backend. type IngressServiceBackend struct { - // Name is the referenced service. The service must exist in + // name is the referenced service. The service must exist in // the same namespace as the Ingress object. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Port of the referenced service. A port name or port number + // port of the referenced service. A port name or port number // is required for a IngressServiceBackend. Port ServiceBackendPort `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` } // ServiceBackendPort is the service port being referenced. type ServiceBackendPort struct { - // Name is the name of the port on the Service. + // name is the name of the port on the Service. // This is a mutually exclusive setting with "Number". // +optional Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // Number is the numerical port number (e.g. 80) on the Service. + // number is the numerical port number (e.g. 80) on the Service. // This is a mutually exclusive setting with "Name". // +optional Number int32 `json:"number,omitempty" protobuf:"bytes,2,opt,name=number"` @@ -532,12 +541,13 @@ type ServiceBackendPort struct { // resources without a class specified will be assigned this default class. type IngressClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressClassSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -545,15 +555,15 @@ type IngressClass struct { // IngressClassSpec provides information about the class of an Ingress. type IngressClassSpec struct { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. Controller string `json:"controller,omitempty" protobuf:"bytes,1,opt,name=controller"` - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -564,7 +574,7 @@ const ( // IngressClassParametersReferenceScopeNamespace indicates that the // referenced Parameters resource is namespace-scoped. IngressClassParametersReferenceScopeNamespace = "Namespace" - // IngressClassParametersReferenceScopeNamespace indicates that the + // IngressClassParametersReferenceScopeCluster indicates that the // referenced Parameters resource is cluster-scoped. IngressClassParametersReferenceScopeCluster = "Cluster" ) @@ -572,20 +582,24 @@ const ( // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. type IngressClassParametersReference struct { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional APIGroup *string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=aPIGroup"` - // Kind is the type of resource being referenced. + + // kind is the type of resource being referenced. Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` - // Name is the name of resource being referenced. + + // name is the name of resource being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` - // Scope represents if this refers to a cluster or namespace scoped resource. + + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". // +optional Scope *string `json:"scope" protobuf:"bytes,4,opt,name=scope"` - // Namespace is the namespace of the resource being referenced. This field is + + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -597,10 +611,11 @@ type IngressClassParametersReference struct { // IngressClassList is a collection of IngressClasses. type IngressClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of IngressClasses. + // items is the list of IngressClasses. Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go index 8e7870dae..ff080540d 100644 --- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -24,14 +24,14 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", - "pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types.", - "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", + "path": "path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", + "pathType": "pathType determines the interpretation of the path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types.", + "backend": "backend defines the referenced service endpoint to which the traffic will be forwarded to.", } func (HTTPIngressPath) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (HTTPIngressPath) SwaggerDoc() map[string]string { var map_HTTPIngressRuleValue = map[string]string{ "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", - "paths": "A collection of paths that map requests to backends.", + "paths": "paths is a collection of paths that map requests to backends.", } func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { @@ -48,9 +48,9 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { } var map_IPBlock = map[string]string{ - "": "IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\",\"2001:db9::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", - "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\"", - "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\" Except values will be rejected if they are outside the CIDR range", + "": "IPBlock describes a particular CIDR (Ex. \"192.168.1.0/24\",\"2001:db8::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", + "cidr": "cidr is a string representing the IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"", + "except": "except is a slice of CIDRs that should not be included within an IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" Except values will be rejected if they are outside the cidr range", } func (IPBlock) SwaggerDoc() map[string]string { @@ -60,8 +60,8 @@ func (IPBlock) SwaggerDoc() map[string]string { var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -70,8 +70,8 @@ func (Ingress) SwaggerDoc() map[string]string { var map_IngressBackend = map[string]string{ "": "IngressBackend describes all endpoints for a given service and port.", - "service": "Service references a Service as a Backend. This is a mutually exclusive setting with \"Resource\".", - "resource": "Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, a service.Name and service.Port must not be specified. This is a mutually exclusive setting with \"Service\".", + "service": "service references a service as a backend. This is a mutually exclusive setting with \"Resource\".", + "resource": "resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, a service.Name and service.Port must not be specified. This is a mutually exclusive setting with \"Service\".", } func (IngressBackend) SwaggerDoc() map[string]string { @@ -81,7 +81,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressClass = map[string]string{ "": "IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (IngressClass) SwaggerDoc() map[string]string { @@ -91,7 +91,7 @@ func (IngressClass) SwaggerDoc() map[string]string { var map_IngressClassList = map[string]string{ "": "IngressClassList is a collection of IngressClasses.", "metadata": "Standard list metadata.", - "items": "Items is the list of IngressClasses.", + "items": "items is the list of IngressClasses.", } func (IngressClassList) SwaggerDoc() map[string]string { @@ -100,11 +100,11 @@ func (IngressClassList) SwaggerDoc() map[string]string { var map_IngressClassParametersReference = map[string]string{ "": "IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource.", - "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", - "kind": "Kind is the type of resource being referenced.", - "name": "Name is the name of resource being referenced.", - "scope": "Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", - "namespace": "Namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", + "apiGroup": "apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "kind": "kind is the type of resource being referenced.", + "name": "name is the name of resource being referenced.", + "scope": "scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", + "namespace": "namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", } func (IngressClassParametersReference) SwaggerDoc() map[string]string { @@ -113,8 +113,8 @@ func (IngressClassParametersReference) SwaggerDoc() map[string]string { var map_IngressClassSpec = map[string]string{ "": "IngressClassSpec provides information about the class of an Ingress.", - "controller": "Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", - "parameters": "Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", + "controller": "controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", + "parameters": "parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", } func (IngressClassSpec) SwaggerDoc() map[string]string { @@ -124,16 +124,47 @@ func (IngressClassSpec) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of Ingress.", + "items": "items is the list of Ingress.", } func (IngressList) SwaggerDoc() map[string]string { return map_IngressList } +var map_IngressLoadBalancerIngress = map[string]string{ + "": "IngressLoadBalancerIngress represents the status of a load-balancer ingress point.", + "ip": "ip is set for load-balancer ingress points that are IP based.", + "hostname": "hostname is set for load-balancer ingress points that are DNS based.", + "ports": "ports provides information about the ports exposed by this LoadBalancer.", +} + +func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { + return map_IngressLoadBalancerIngress +} + +var map_IngressLoadBalancerStatus = map[string]string{ + "": "IngressLoadBalancerStatus represents the status of a load-balancer.", + "ingress": "ingress is a list containing ingress points for the load-balancer.", +} + +func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { + return map_IngressLoadBalancerStatus +} + +var map_IngressPortStatus = map[string]string{ + "": "IngressPortStatus represents the error condition of a service port", + "port": "port is the port number of the ingress port.", + "protocol": "protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "error": "error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", +} + +func (IngressPortStatus) SwaggerDoc() map[string]string { + return map_IngressPortStatus +} + var map_IngressRule = map[string]string{ "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", - "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", + "host": "host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nhost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If host is precise, the request matches this rule if the http host header is equal to Host. 2. If host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", } func (IngressRule) SwaggerDoc() map[string]string { @@ -150,8 +181,8 @@ func (IngressRuleValue) SwaggerDoc() map[string]string { var map_IngressServiceBackend = map[string]string{ "": "IngressServiceBackend references a Kubernetes Service as a Backend.", - "name": "Name is the referenced service. The service must exist in the same namespace as the Ingress object.", - "port": "Port of the referenced service. A port name or port number is required for a IngressServiceBackend.", + "name": "name is the referenced service. The service must exist in the same namespace as the Ingress object.", + "port": "port of the referenced service. A port name or port number is required for a IngressServiceBackend.", } func (IngressServiceBackend) SwaggerDoc() map[string]string { @@ -160,10 +191,10 @@ func (IngressServiceBackend) SwaggerDoc() map[string]string { var map_IngressSpec = map[string]string{ "": "IngressSpec describes the Ingress the user wishes to exist.", - "ingressClassName": "IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.", - "defaultBackend": "DefaultBackend is the backend that should handle requests that don't match any rule. If Rules are not specified, DefaultBackend must be specified. If DefaultBackend is not set, the handling of requests that do not match any of the rules will be up to the Ingress controller.", - "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", - "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", + "ingressClassName": "ingressClassName is the name of an IngressClass cluster resource. Ingress controller implementations use this field to know whether they should be serving this Ingress resource, by a transitive connection (controller -> IngressClass -> Ingress resource). Although the `kubernetes.io/ingress.class` annotation (simple constant name) was never formally defined, it was widely supported by Ingress controllers to create a direct binding between Ingress controller and Ingress resources. Newly created Ingress resources should prefer using the field. However, even though the annotation is officially deprecated, for backwards compatibility reasons, ingress controllers should still honor that annotation if present.", + "defaultBackend": "defaultBackend is the backend that should handle requests that don't match any rule. If Rules are not specified, DefaultBackend must be specified. If DefaultBackend is not set, the handling of requests that do not match any of the rules will be up to the Ingress controller.", + "tls": "tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", } func (IngressSpec) SwaggerDoc() map[string]string { @@ -172,7 +203,7 @@ func (IngressSpec) SwaggerDoc() map[string]string { var map_IngressStatus = map[string]string{ "": "IngressStatus describe the current state of the Ingress.", - "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", + "loadBalancer": "loadBalancer contains the current status of the load-balancer.", } func (IngressStatus) SwaggerDoc() map[string]string { @@ -180,9 +211,9 @@ func (IngressStatus) SwaggerDoc() map[string]string { } var map_IngressTLS = map[string]string{ - "": "IngressTLS describes the transport layer security associated with an Ingress.", - "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", - "secretName": "SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", + "": "IngressTLS describes the transport layer security associated with an ingress.", + "hosts": "hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", + "secretName": "secretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the \"Host\" header is used for routing.", } func (IngressTLS) SwaggerDoc() map[string]string { @@ -192,8 +223,7 @@ func (IngressTLS) SwaggerDoc() map[string]string { var map_NetworkPolicy = map[string]string{ "": "NetworkPolicy describes what network traffic is allowed for a set of Pods", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired behavior for this NetworkPolicy.", - "status": "Status is the current state of the NetworkPolicy. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec represents the specification of the desired behavior for this NetworkPolicy.", } func (NetworkPolicy) SwaggerDoc() map[string]string { @@ -202,8 +232,8 @@ func (NetworkPolicy) SwaggerDoc() map[string]string { var map_NetworkPolicyEgressRule = map[string]string{ "": "NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", - "ports": "List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "to": "List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", + "ports": "ports is a list of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "to": "to is a list of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", } func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { @@ -212,8 +242,8 @@ func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyIngressRule = map[string]string{ "": "NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.", - "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", + "ports": "ports is a list of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "from": "from is a list of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", } func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { @@ -223,7 +253,7 @@ func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyList = map[string]string{ "": "NetworkPolicyList is a list of NetworkPolicy objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (NetworkPolicyList) SwaggerDoc() map[string]string { @@ -232,9 +262,9 @@ func (NetworkPolicyList) SwaggerDoc() map[string]string { var map_NetworkPolicyPeer = map[string]string{ "": "NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of fields are allowed", - "podSelector": "This is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.", - "namespaceSelector": "Selects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.", - "ipBlock": "IPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.", + "podSelector": "podSelector is a label selector which selects pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the pods matching podSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the pods matching podSelector in the policy's own namespace.", + "namespaceSelector": "namespaceSelector selects namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf podSelector is also set, then the NetworkPolicyPeer as a whole selects the pods matching podSelector in the namespaces selected by namespaceSelector. Otherwise it selects all pods in the namespaces selected by namespaceSelector.", + "ipBlock": "ipBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.", } func (NetworkPolicyPeer) SwaggerDoc() map[string]string { @@ -243,9 +273,9 @@ func (NetworkPolicyPeer) SwaggerDoc() map[string]string { var map_NetworkPolicyPort = map[string]string{ "": "NetworkPolicyPort describes a port to allow traffic on", - "protocol": "The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", - "port": "The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", - "endPort": "If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate \"NetworkPolicyEndPort\".", + "protocol": "protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", + "port": "port represents the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", + "endPort": "endPort indicates that the range of ports from port to endPort if set, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port.", } func (NetworkPolicyPort) SwaggerDoc() map[string]string { @@ -254,29 +284,20 @@ func (NetworkPolicyPort) SwaggerDoc() map[string]string { var map_NetworkPolicySpec = map[string]string{ "": "NetworkPolicySpec provides the specification of a NetworkPolicy", - "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", - "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", - "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", - "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", + "podSelector": "podSelector selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", + "ingress": "ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", + "egress": "egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", + "policyTypes": "policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", } func (NetworkPolicySpec) SwaggerDoc() map[string]string { return map_NetworkPolicySpec } -var map_NetworkPolicyStatus = map[string]string{ - "": "NetworkPolicyStatus describe the current state of the NetworkPolicy.", - "conditions": "Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. Current service state", -} - -func (NetworkPolicyStatus) SwaggerDoc() map[string]string { - return map_NetworkPolicyStatus -} - var map_ServiceBackendPort = map[string]string{ "": "ServiceBackendPort is the service port being referenced.", - "name": "Name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\".", - "number": "Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \"Name\".", + "name": "name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\".", + "number": "number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \"Name\".", } func (ServiceBackendPort) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go index 349c40ca7..540873833 100644 --- a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go @@ -293,6 +293,73 @@ func (in *IngressList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressLoadBalancerIngress) DeepCopyInto(out *IngressLoadBalancerIngress) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]IngressPortStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerIngress. +func (in *IngressLoadBalancerIngress) DeepCopy() *IngressLoadBalancerIngress { + if in == nil { + return nil + } + out := new(IngressLoadBalancerIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressLoadBalancerStatus) DeepCopyInto(out *IngressLoadBalancerStatus) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]IngressLoadBalancerIngress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerStatus. +func (in *IngressLoadBalancerStatus) DeepCopy() *IngressLoadBalancerStatus { + if in == nil { + return nil + } + out := new(IngressLoadBalancerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressPortStatus) DeepCopyInto(out *IngressPortStatus) { + *out = *in + if in.Error != nil { + in, out := &in.Error, &out.Error + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressPortStatus. +func (in *IngressPortStatus) DeepCopy() *IngressPortStatus { + if in == nil { + return nil + } + out := new(IngressPortStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IngressRule) DeepCopyInto(out *IngressRule) { *out = *in @@ -432,7 +499,6 @@ func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) return } @@ -645,29 +711,6 @@ func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkPolicyStatus) DeepCopyInto(out *NetworkPolicyStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]metav1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyStatus. -func (in *NetworkPolicyStatus) DeepCopy() *NetworkPolicyStatus { - if in == nil { - return nil - } - out := new(NetworkPolicyStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceBackendPort) DeepCopyInto(out *ServiceBackendPort) { *out = *in diff --git a/vendor/k8s.io/api/networking/v1alpha1/doc.go b/vendor/k8s.io/api/networking/v1alpha1/doc.go new file mode 100644 index 000000000..3827b0418 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true +// +groupName=networking.k8s.io + +package v1alpha1 // import "k8s.io/api/networking/v1alpha1" diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go new file mode 100644 index 000000000..f54d1f824 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go @@ -0,0 +1,1858 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ClusterCIDR) Reset() { *m = ClusterCIDR{} } +func (*ClusterCIDR) ProtoMessage() {} +func (*ClusterCIDR) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{0} +} +func (m *ClusterCIDR) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterCIDR) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterCIDR.Merge(m, src) +} +func (m *ClusterCIDR) XXX_Size() int { + return m.Size() +} +func (m *ClusterCIDR) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterCIDR.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterCIDR proto.InternalMessageInfo + +func (m *ClusterCIDRList) Reset() { *m = ClusterCIDRList{} } +func (*ClusterCIDRList) ProtoMessage() {} +func (*ClusterCIDRList) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{1} +} +func (m *ClusterCIDRList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterCIDRList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterCIDRList.Merge(m, src) +} +func (m *ClusterCIDRList) XXX_Size() int { + return m.Size() +} +func (m *ClusterCIDRList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterCIDRList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterCIDRList proto.InternalMessageInfo + +func (m *ClusterCIDRSpec) Reset() { *m = ClusterCIDRSpec{} } +func (*ClusterCIDRSpec) ProtoMessage() {} +func (*ClusterCIDRSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{2} +} +func (m *ClusterCIDRSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterCIDRSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterCIDRSpec.Merge(m, src) +} +func (m *ClusterCIDRSpec) XXX_Size() int { + return m.Size() +} +func (m *ClusterCIDRSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterCIDRSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterCIDRSpec proto.InternalMessageInfo + +func (m *IPAddress) Reset() { *m = IPAddress{} } +func (*IPAddress) ProtoMessage() {} +func (*IPAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{3} +} +func (m *IPAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddress.Merge(m, src) +} +func (m *IPAddress) XXX_Size() int { + return m.Size() +} +func (m *IPAddress) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddress.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddress proto.InternalMessageInfo + +func (m *IPAddressList) Reset() { *m = IPAddressList{} } +func (*IPAddressList) ProtoMessage() {} +func (*IPAddressList) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{4} +} +func (m *IPAddressList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressList.Merge(m, src) +} +func (m *IPAddressList) XXX_Size() int { + return m.Size() +} +func (m *IPAddressList) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressList.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddressList proto.InternalMessageInfo + +func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } +func (*IPAddressSpec) ProtoMessage() {} +func (*IPAddressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{5} +} +func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressSpec.Merge(m, src) +} +func (m *IPAddressSpec) XXX_Size() int { + return m.Size() +} +func (m *IPAddressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo + +func (m *ParentReference) Reset() { *m = ParentReference{} } +func (*ParentReference) ProtoMessage() {} +func (*ParentReference) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{6} +} +func (m *ParentReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParentReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParentReference.Merge(m, src) +} +func (m *ParentReference) XXX_Size() int { + return m.Size() +} +func (m *ParentReference) XXX_DiscardUnknown() { + xxx_messageInfo_ParentReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ParentReference proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ClusterCIDR)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDR") + proto.RegisterType((*ClusterCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRList") + proto.RegisterType((*ClusterCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRSpec") + proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1alpha1.IPAddress") + proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1alpha1.IPAddressList") + proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1alpha1.IPAddressSpec") + proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1alpha1.ParentReference") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1alpha1/generated.proto", fileDescriptor_c1b7ac8d7d97acec) +} + +var fileDescriptor_c1b7ac8d7d97acec = []byte{ + // 698 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x95, 0xcf, 0x4e, 0xdb, 0x4a, + 0x14, 0xc6, 0x63, 0x92, 0x48, 0x78, 0x00, 0x85, 0xeb, 0xcd, 0x8d, 0x58, 0x38, 0xb9, 0xb9, 0x1b, + 0xae, 0x6e, 0x19, 0x03, 0x42, 0x51, 0xb7, 0x98, 0x48, 0x34, 0x52, 0x0b, 0xe9, 0x20, 0xba, 0xa8, + 0x58, 0xd4, 0xb1, 0x0f, 0x8e, 0x1b, 0xfc, 0x47, 0x33, 0xe3, 0x54, 0xec, 0xfa, 0x08, 0x7d, 0xa1, + 0x56, 0x6a, 0x57, 0x2c, 0x59, 0xb2, 0x8a, 0x8a, 0xfb, 0x02, 0x5d, 0xb7, 0x9b, 0x6a, 0x26, 0x4e, + 0xec, 0x24, 0x0d, 0xd0, 0x0d, 0xbb, 0xcc, 0x39, 0xbf, 0xf3, 0xcd, 0x39, 0x73, 0xbe, 0x24, 0xe8, + 0xb0, 0xff, 0x94, 0x61, 0x2f, 0x34, 0xfa, 0x71, 0x17, 0x68, 0x00, 0x1c, 0x98, 0x31, 0x80, 0xc0, + 0x09, 0xa9, 0x91, 0x26, 0xac, 0xc8, 0x33, 0x02, 0xe0, 0xef, 0x42, 0xda, 0xf7, 0x02, 0xd7, 0x18, + 0xec, 0x58, 0x17, 0x51, 0xcf, 0xda, 0x31, 0x5c, 0x08, 0x80, 0x5a, 0x1c, 0x1c, 0x1c, 0xd1, 0x90, + 0x87, 0x9a, 0x3e, 0xe2, 0xb1, 0x15, 0x79, 0x38, 0xe3, 0xf1, 0x98, 0xdf, 0xd8, 0x72, 0x3d, 0xde, + 0x8b, 0xbb, 0xd8, 0x0e, 0x7d, 0xc3, 0x0d, 0xdd, 0xd0, 0x90, 0x65, 0xdd, 0xf8, 0x5c, 0x9e, 0xe4, + 0x41, 0x7e, 0x1a, 0xc9, 0x6d, 0x34, 0x72, 0xd7, 0xdb, 0x21, 0x05, 0x63, 0x30, 0x77, 0xe5, 0xc6, + 0x5e, 0xc6, 0xf8, 0x96, 0xdd, 0xf3, 0x02, 0xa0, 0x97, 0x46, 0xd4, 0x77, 0x45, 0x80, 0x19, 0x3e, + 0x70, 0xeb, 0x77, 0x55, 0xc6, 0xa2, 0x2a, 0x1a, 0x07, 0xdc, 0xf3, 0x61, 0xae, 0xa0, 0x79, 0x5f, + 0x01, 0xb3, 0x7b, 0xe0, 0x5b, 0xb3, 0x75, 0x8d, 0x2f, 0x0a, 0x5a, 0x39, 0xb8, 0x88, 0x19, 0x07, + 0x7a, 0xd0, 0x6e, 0x11, 0xed, 0x0d, 0x5a, 0x16, 0x3d, 0x39, 0x16, 0xb7, 0xaa, 0x4a, 0x5d, 0xd9, + 0x5c, 0xd9, 0xdd, 0xc6, 0xd9, 0xa3, 0x4d, 0xa4, 0x71, 0xd4, 0x77, 0x45, 0x80, 0x61, 0x41, 0xe3, + 0xc1, 0x0e, 0x3e, 0xee, 0xbe, 0x05, 0x9b, 0xbf, 0x00, 0x6e, 0x99, 0xda, 0xd5, 0xb0, 0x56, 0x48, + 0x86, 0x35, 0x94, 0xc5, 0xc8, 0x44, 0x55, 0x7b, 0x89, 0x4a, 0x2c, 0x02, 0xbb, 0xba, 0x24, 0xd5, + 0x0d, 0x7c, 0xf7, 0x4a, 0x70, 0xae, 0xb9, 0x93, 0x08, 0x6c, 0x73, 0x35, 0x15, 0x2f, 0x89, 0x13, + 0x91, 0x52, 0x8d, 0xcf, 0x0a, 0xaa, 0xe4, 0xb8, 0xe7, 0x1e, 0xe3, 0xda, 0xd9, 0xdc, 0x20, 0xf8, + 0x61, 0x83, 0x88, 0x6a, 0x39, 0xc6, 0x7a, 0x7a, 0xd3, 0xf2, 0x38, 0x92, 0x1b, 0xa2, 0x83, 0xca, + 0x1e, 0x07, 0x9f, 0x55, 0x97, 0xea, 0xc5, 0xcd, 0x95, 0xdd, 0xff, 0xff, 0x60, 0x0a, 0x73, 0x2d, + 0xd5, 0x2d, 0xb7, 0x85, 0x02, 0x19, 0x09, 0x35, 0xbe, 0x4f, 0xcf, 0x20, 0xa6, 0xd3, 0x5e, 0xa1, + 0xd5, 0x20, 0x74, 0xe0, 0x04, 0x2e, 0xc0, 0xe6, 0x21, 0x4d, 0xe7, 0xa8, 0xe7, 0x2f, 0x13, 0xb6, + 0x13, 0x5d, 0x1f, 0xe5, 0x38, 0x73, 0x3d, 0x19, 0xd6, 0x56, 0xf3, 0x11, 0x32, 0xa5, 0xa3, 0xed, + 0xa3, 0x4a, 0x04, 0x54, 0x00, 0xcf, 0x42, 0xc6, 0x4d, 0x8f, 0x33, 0xb9, 0x8d, 0xb2, 0xf9, 0x77, + 0xda, 0x5a, 0xa5, 0x33, 0x9d, 0x26, 0xb3, 0xbc, 0x56, 0x47, 0x25, 0x2f, 0x1a, 0xec, 0x55, 0x8b, + 0x75, 0x65, 0x53, 0xcd, 0x96, 0xd2, 0xee, 0x0c, 0xf6, 0x88, 0xcc, 0xa4, 0x44, 0xb3, 0x5a, 0x9a, + 0x23, 0x9a, 0x92, 0x68, 0x36, 0x3e, 0x29, 0x48, 0x6d, 0x77, 0xf6, 0x1d, 0x87, 0x02, 0x63, 0x8f, + 0xe0, 0xbc, 0xe3, 0x29, 0xe7, 0x6d, 0xdd, 0xb7, 0xb3, 0x49, 0x6b, 0x0b, 0x7d, 0xf7, 0x51, 0x41, + 0x6b, 0x13, 0xea, 0x11, 0x5c, 0x77, 0x34, 0xed, 0xba, 0xff, 0x1e, 0x3c, 0xc1, 0x02, 0xcf, 0xf9, + 0xb9, 0xf6, 0xa5, 0xe1, 0xce, 0x90, 0x1a, 0x59, 0x14, 0x02, 0x4e, 0xe0, 0x3c, 0xed, 0xff, 0xde, + 0x2f, 0x68, 0x67, 0x5c, 0x00, 0x14, 0x02, 0x1b, 0xcc, 0xb5, 0x64, 0x58, 0x53, 0x27, 0x41, 0x92, + 0x09, 0x36, 0x7e, 0x2a, 0xa8, 0x32, 0x43, 0x6b, 0xff, 0xa2, 0xb2, 0x4b, 0xc3, 0x38, 0x92, 0xb7, + 0xa9, 0x59, 0x9f, 0x87, 0x22, 0x48, 0x46, 0x39, 0xed, 0x09, 0x5a, 0xa6, 0xc0, 0xc2, 0x98, 0xda, + 0x20, 0x97, 0xa7, 0x66, 0xaf, 0x44, 0xd2, 0x38, 0x99, 0x10, 0x9a, 0x81, 0xd4, 0xc0, 0xf2, 0x81, + 0x45, 0x96, 0x0d, 0xa9, 0x3f, 0xff, 0x4a, 0x71, 0xf5, 0x68, 0x9c, 0x20, 0x19, 0x23, 0x9c, 0x2a, + 0x0e, 0xb3, 0x4e, 0x15, 0x2c, 0x91, 0x19, 0xcd, 0x44, 0xc5, 0xd8, 0x73, 0xaa, 0x65, 0x09, 0x6c, + 0xa7, 0x40, 0xf1, 0xb4, 0xdd, 0xfa, 0x31, 0xac, 0xfd, 0xb3, 0xe8, 0x97, 0x97, 0x5f, 0x46, 0xc0, + 0xf0, 0x69, 0xbb, 0x45, 0x44, 0xb1, 0xd9, 0xba, 0xba, 0xd5, 0x0b, 0xd7, 0xb7, 0x7a, 0xe1, 0xe6, + 0x56, 0x2f, 0xbc, 0x4f, 0x74, 0xe5, 0x2a, 0xd1, 0x95, 0xeb, 0x44, 0x57, 0x6e, 0x12, 0x5d, 0xf9, + 0x9a, 0xe8, 0xca, 0x87, 0x6f, 0x7a, 0xe1, 0xb5, 0x7e, 0xf7, 0x3f, 0xda, 0xaf, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xf9, 0x9d, 0x9e, 0xc6, 0x0b, 0x07, 0x00, 0x00, +} + +func (m *ClusterCIDR) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterCIDR) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterCIDRList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterCIDRList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterCIDRSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterCIDRSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.IPv6) + copy(dAtA[i:], m.IPv6) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPv6))) + i-- + dAtA[i] = 0x22 + i -= len(m.IPv4) + copy(dAtA[i:], m.IPv4) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPv4))) + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.PerNodeHostBits)) + i-- + dAtA[i] = 0x10 + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *IPAddress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IPAddressList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ParentRef != nil { + { + size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ParentReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x2a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClusterCIDR) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterCIDRList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterCIDRSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.PerNodeHostBits)) + l = len(m.IPv4) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IPv6) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IPAddress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IPAddressList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IPAddressSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParentRef != nil { + l = m.ParentRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ParentReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterCIDR) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterCIDR{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterCIDRSpec", "ClusterCIDRSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterCIDRList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterCIDR{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterCIDR", "ClusterCIDR", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterCIDRList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterCIDRSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterCIDRSpec{`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, + `PerNodeHostBits:` + fmt.Sprintf("%v", this.PerNodeHostBits) + `,`, + `IPv4:` + fmt.Sprintf("%v", this.IPv4) + `,`, + `IPv6:` + fmt.Sprintf("%v", this.IPv6) + `,`, + `}`, + }, "") + return s +} +func (this *IPAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAddress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IPAddressList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]IPAddress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IPAddressList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *IPAddressSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAddressSpec{`, + `ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ParentReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParentReference{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterCIDR: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterCIDRList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterCIDR{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterCIDRSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PerNodeHostBits", wireType) + } + m.PerNodeHostBits = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PerNodeHostBits |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPv4", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IPv4 = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPv6", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IPv6 = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAddress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAddressList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, IPAddress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ParentRef == nil { + m.ParentRef = &ParentReference{} + } + if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParentReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParentReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.proto b/vendor/k8s.io/api/networking/v1alpha1/generated.proto new file mode 100644 index 000000000..0f1f30d70 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/generated.proto @@ -0,0 +1,155 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.networking.v1alpha1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/networking/v1alpha1"; + +// ClusterCIDR represents a single configuration for per-Node Pod CIDR +// allocations when the MultiCIDRRangeAllocator is enabled (see the config for +// kube-controller-manager). A cluster may have any number of ClusterCIDR +// resources, all of which will be considered when allocating a CIDR for a +// Node. A ClusterCIDR is eligible to be used for a given Node when the node +// selector matches the node in question and has free CIDRs to allocate. In +// case of multiple matching ClusterCIDR resources, the allocator will attempt +// to break ties using internal heuristics, but any ClusterCIDR whose node +// selector matches the Node may be used. +message ClusterCIDR { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the ClusterCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ClusterCIDRSpec spec = 2; +} + +// ClusterCIDRList contains a list of ClusterCIDR. +message ClusterCIDRList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of ClusterCIDRs. + repeated ClusterCIDR items = 2; +} + +// ClusterCIDRSpec defines the desired state of ClusterCIDR. +message ClusterCIDRSpec { + // nodeSelector defines which nodes the config is applicable to. + // An empty or nil nodeSelector selects all nodes. + // This field is immutable. + // +optional + optional k8s.io.api.core.v1.NodeSelector nodeSelector = 1; + + // perNodeHostBits defines the number of host bits to be configured per node. + // A subnet mask determines how much of the address is used for network bits + // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the + // address into 24 bits for the network portion and 8 bits for the host portion. + // To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). + // Minimum value is 4 (16 IPs). + // This field is immutable. + // +required + optional int32 perNodeHostBits = 2; + + // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). + // At least one of ipv4 and ipv6 must be specified. + // This field is immutable. + // +optional + optional string ipv4 = 3; + + // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). + // At least one of ipv4 and ipv6 must be specified. + // This field is immutable. + // +optional + optional string ipv6 = 4; +} + +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, +// the name of the object is the IP address in canonical format, four decimal digits separated +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 +message IPAddress { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the IPAddress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional IPAddressSpec spec = 2; +} + +// IPAddressList contains a list of IPAddress. +message IPAddressList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of IPAddresses. + repeated IPAddress items = 2; +} + +// IPAddressSpec describe the attributes in an IP Address. +message IPAddressSpec { + // ParentRef references the resource that an IPAddress is attached to. + // An IPAddress must reference a parent object. + // +required + optional ParentReference parentRef = 1; +} + +// ParentReference describes a reference to a parent object. +message ParentReference { + // Group is the group of the object being referenced. + // +optional + optional string group = 1; + + // Resource is the resource of the object being referenced. + // +required + optional string resource = 2; + + // Namespace is the namespace of the object being referenced. + // +optional + optional string namespace = 3; + + // Name is the name of the object being referenced. + // +required + optional string name = 4; + + // UID is the uid of the object being referenced. + // +optional + optional string uid = 5; +} + diff --git a/vendor/k8s.io/api/networking/v1alpha1/register.go b/vendor/k8s.io/api/networking/v1alpha1/register.go new file mode 100644 index 000000000..8dda6394d --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/register.go @@ -0,0 +1,62 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name used in this package. +const GroupName = "networking.k8s.io" + +// SchemeGroupVersion is group version used to register objects in this package. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder holds functions that add things to a scheme. + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + + // AddToScheme adds the types of this group into the given scheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ClusterCIDR{}, + &ClusterCIDRList{}, + &IPAddress{}, + &IPAddressList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/networking/v1alpha1/types.go b/vendor/k8s.io/api/networking/v1alpha1/types.go new file mode 100644 index 000000000..52e4a11e8 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/types.go @@ -0,0 +1,163 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.25 + +// ClusterCIDR represents a single configuration for per-Node Pod CIDR +// allocations when the MultiCIDRRangeAllocator is enabled (see the config for +// kube-controller-manager). A cluster may have any number of ClusterCIDR +// resources, all of which will be considered when allocating a CIDR for a +// Node. A ClusterCIDR is eligible to be used for a given Node when the node +// selector matches the node in question and has free CIDRs to allocate. In +// case of multiple matching ClusterCIDR resources, the allocator will attempt +// to break ties using internal heuristics, but any ClusterCIDR whose node +// selector matches the Node may be used. +type ClusterCIDR struct { + metav1.TypeMeta `json:",inline"` + + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the desired state of the ClusterCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ClusterCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// ClusterCIDRSpec defines the desired state of ClusterCIDR. +type ClusterCIDRSpec struct { + // nodeSelector defines which nodes the config is applicable to. + // An empty or nil nodeSelector selects all nodes. + // This field is immutable. + // +optional + NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` + + // perNodeHostBits defines the number of host bits to be configured per node. + // A subnet mask determines how much of the address is used for network bits + // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the + // address into 24 bits for the network portion and 8 bits for the host portion. + // To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). + // Minimum value is 4 (16 IPs). + // This field is immutable. + // +required + PerNodeHostBits int32 `json:"perNodeHostBits" protobuf:"varint,2,opt,name=perNodeHostBits"` + + // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). + // At least one of ipv4 and ipv6 must be specified. + // This field is immutable. + // +optional + IPv4 string `json:"ipv4" protobuf:"bytes,3,opt,name=ipv4"` + + // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). + // At least one of ipv4 and ipv6 must be specified. + // This field is immutable. + // +optional + IPv6 string `json:"ipv6" protobuf:"bytes,4,opt,name=ipv6"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.25 + +// ClusterCIDRList contains a list of ClusterCIDR. +type ClusterCIDRList struct { + metav1.TypeMeta `json:",inline"` + + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of ClusterCIDRs. + Items []ClusterCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, +// the name of the object is the IP address in canonical format, four decimal digits separated +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 +type IPAddress struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec is the desired state of the IPAddress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec IPAddressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// IPAddressSpec describe the attributes in an IP Address. +type IPAddressSpec struct { + // ParentRef references the resource that an IPAddress is attached to. + // An IPAddress must reference a parent object. + // +required + ParentRef *ParentReference `json:"parentRef,omitempty" protobuf:"bytes,1,opt,name=parentRef"` +} + +// ParentReference describes a reference to a parent object. +type ParentReference struct { + // Group is the group of the object being referenced. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` + // Resource is the resource of the object being referenced. + // +required + Resource string `json:"resource,omitempty" protobuf:"bytes,2,opt,name=resource"` + // Namespace is the namespace of the object being referenced. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + // Name is the name of the object being referenced. + // +required + Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"` + // UID is the uid of the object being referenced. + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// IPAddressList contains a list of IPAddress. +type IPAddressList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of IPAddresses. + Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..85304784f --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,104 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ClusterCIDR = map[string]string{ + "": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (ClusterCIDR) SwaggerDoc() map[string]string { + return map_ClusterCIDR +} + +var map_ClusterCIDRList = map[string]string{ + "": "ClusterCIDRList contains a list of ClusterCIDR.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of ClusterCIDRs.", +} + +func (ClusterCIDRList) SwaggerDoc() map[string]string { + return map_ClusterCIDRList +} + +var map_ClusterCIDRSpec = map[string]string{ + "": "ClusterCIDRSpec defines the desired state of ClusterCIDR.", + "nodeSelector": "nodeSelector defines which nodes the config is applicable to. An empty or nil nodeSelector selects all nodes. This field is immutable.", + "perNodeHostBits": "perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", + "ipv4": "ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", + "ipv6": "ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", +} + +func (ClusterCIDRSpec) SwaggerDoc() map[string]string { + return map_ClusterCIDRSpec +} + +var map_IPAddress = map[string]string{ + "": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (IPAddress) SwaggerDoc() map[string]string { + return map_IPAddress +} + +var map_IPAddressList = map[string]string{ + "": "IPAddressList contains a list of IPAddress.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of IPAddresses.", +} + +func (IPAddressList) SwaggerDoc() map[string]string { + return map_IPAddressList +} + +var map_IPAddressSpec = map[string]string{ + "": "IPAddressSpec describe the attributes in an IP Address.", + "parentRef": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.", +} + +func (IPAddressSpec) SwaggerDoc() map[string]string { + return map_IPAddressSpec +} + +var map_ParentReference = map[string]string{ + "": "ParentReference describes a reference to a parent object.", + "group": "Group is the group of the object being referenced.", + "resource": "Resource is the resource of the object being referenced.", + "namespace": "Namespace is the namespace of the object being referenced.", + "name": "Name is the name of the object being referenced.", + "uid": "UID is the uid of the object being referenced.", +} + +func (ParentReference) SwaggerDoc() map[string]string { + return map_ParentReference +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go b/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go new file mode 100644 index 000000000..5f9c23f70 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +const ( + + // TODO: Use IPFamily as field with a field selector,And the value is set based on + // the name at create time and immutable. + // LabelIPAddressFamily is used to indicate the IP family of a Kubernetes IPAddress. + // This label simplify dual-stack client operations allowing to obtain the list of + // IP addresses filtered by family. + LabelIPAddressFamily = "ipaddress.kubernetes.io/ip-family" + // LabelManagedBy is used to indicate the controller or entity that manages + // an IPAddress. This label aims to enable different IPAddress + // objects to be managed by different controllers or entities within the + // same cluster. It is highly recommended to configure this label for all + // IPAddress objects. + LabelManagedBy = "ipaddress.kubernetes.io/managed-by" +) diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..97db2eacc --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,205 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCIDR) DeepCopyInto(out *ClusterCIDR) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDR. +func (in *ClusterCIDR) DeepCopy() *ClusterCIDR { + if in == nil { + return nil + } + out := new(ClusterCIDR) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterCIDR) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCIDRList) DeepCopyInto(out *ClusterCIDRList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterCIDR, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRList. +func (in *ClusterCIDRList) DeepCopy() *ClusterCIDRList { + if in == nil { + return nil + } + out := new(ClusterCIDRList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterCIDRList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCIDRSpec) DeepCopyInto(out *ClusterCIDRSpec) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(v1.NodeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRSpec. +func (in *ClusterCIDRSpec) DeepCopy() *ClusterCIDRSpec { + if in == nil { + return nil + } + out := new(ClusterCIDRSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddress) DeepCopyInto(out *IPAddress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress. +func (in *IPAddress) DeepCopy() *IPAddress { + if in == nil { + return nil + } + out := new(IPAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAddress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IPAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList. +func (in *IPAddressList) DeepCopy() *IPAddressList { + if in == nil { + return nil + } + out := new(IPAddressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAddressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) { + *out = *in + if in.ParentRef != nil { + in, out := &in.ParentRef, &out.ParentRef + *out = new(ParentReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec. +func (in *IPAddressSpec) DeepCopy() *IPAddressSpec { + if in == nil { + return nil + } + out := new(IPAddressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParentReference) DeepCopyInto(out *ParentReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. +func (in *ParentReference) DeepCopy() *ParentReference { + if in == nil { + return nil + } + out := new(ParentReference) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..60438ba59 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,94 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterCIDR) APILifecycleIntroduced() (major, minor int) { + return 1, 25 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ClusterCIDR) APILifecycleDeprecated() (major, minor int) { + return 1, 28 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ClusterCIDR) APILifecycleRemoved() (major, minor int) { + return 1, 31 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterCIDRList) APILifecycleIntroduced() (major, minor int) { + return 1, 25 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ClusterCIDRList) APILifecycleDeprecated() (major, minor int) { + return 1, 28 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ClusterCIDRList) APILifecycleRemoved() (major, minor int) { + return 1, 31 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IPAddress) APILifecycleIntroduced() (major, minor int) { + return 1, 27 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *IPAddress) APILifecycleDeprecated() (major, minor int) { + return 1, 30 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *IPAddress) APILifecycleRemoved() (major, minor int) { + return 1, 33 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) { + return 1, 27 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) { + return 1, 30 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *IPAddressList) APILifecycleRemoved() (major, minor int) { + return 1, 33 +} diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go index 8eb092b4d..6f298cd78 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go @@ -25,6 +25,7 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" + k8s_io_api_core_v1 "k8s.io/api/core/v1" v11 "k8s.io/api/core/v1" math "math" @@ -296,10 +297,94 @@ func (m *IngressList) XXX_DiscardUnknown() { var xxx_messageInfo_IngressList proto.InternalMessageInfo +func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} } +func (*IngressLoadBalancerIngress) ProtoMessage() {} +func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{9} +} +func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressLoadBalancerIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressLoadBalancerIngress) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressLoadBalancerIngress.Merge(m, src) +} +func (m *IngressLoadBalancerIngress) XXX_Size() int { + return m.Size() +} +func (m *IngressLoadBalancerIngress) XXX_DiscardUnknown() { + xxx_messageInfo_IngressLoadBalancerIngress.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo + +func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} } +func (*IngressLoadBalancerStatus) ProtoMessage() {} +func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{10} +} +func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressLoadBalancerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressLoadBalancerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressLoadBalancerStatus.Merge(m, src) +} +func (m *IngressLoadBalancerStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressLoadBalancerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressLoadBalancerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo + +func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} } +func (*IngressPortStatus) ProtoMessage() {} +func (*IngressPortStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{11} +} +func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressPortStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressPortStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressPortStatus.Merge(m, src) +} +func (m *IngressPortStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressPortStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressPortStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo + func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} func (*IngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{9} + return fileDescriptor_5bea11de0ceb8f53, []int{12} } func (m *IngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -327,7 +412,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} func (*IngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{10} + return fileDescriptor_5bea11de0ceb8f53, []int{13} } func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -355,7 +440,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} func (*IngressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{11} + return fileDescriptor_5bea11de0ceb8f53, []int{14} } func (m *IngressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -383,7 +468,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} func (*IngressStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{12} + return fileDescriptor_5bea11de0ceb8f53, []int{15} } func (m *IngressStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -411,7 +496,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} func (*IngressTLS) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{13} + return fileDescriptor_5bea11de0ceb8f53, []int{16} } func (m *IngressTLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -446,6 +531,9 @@ func init() { proto.RegisterType((*IngressClassParametersReference)(nil), "k8s.io.api.networking.v1beta1.IngressClassParametersReference") proto.RegisterType((*IngressClassSpec)(nil), "k8s.io.api.networking.v1beta1.IngressClassSpec") proto.RegisterType((*IngressList)(nil), "k8s.io.api.networking.v1beta1.IngressList") + proto.RegisterType((*IngressLoadBalancerIngress)(nil), "k8s.io.api.networking.v1beta1.IngressLoadBalancerIngress") + proto.RegisterType((*IngressLoadBalancerStatus)(nil), "k8s.io.api.networking.v1beta1.IngressLoadBalancerStatus") + proto.RegisterType((*IngressPortStatus)(nil), "k8s.io.api.networking.v1beta1.IngressPortStatus") proto.RegisterType((*IngressRule)(nil), "k8s.io.api.networking.v1beta1.IngressRule") proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.IngressRuleValue") proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.networking.v1beta1.IngressSpec") @@ -458,75 +546,85 @@ func init() { } var fileDescriptor_5bea11de0ceb8f53 = []byte{ - // 1087 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x41, 0x6f, 0x1b, 0x45, - 0x14, 0xce, 0xda, 0x71, 0xeb, 0x8c, 0x9d, 0x34, 0x1a, 0x72, 0xb0, 0x22, 0xd5, 0x8e, 0xf6, 0x80, - 0x02, 0xa5, 0xbb, 0xc4, 0x2d, 0x88, 0x13, 0x82, 0x8d, 0x04, 0x89, 0x6a, 0x1a, 0x33, 0xb6, 0x00, - 0x21, 0x0e, 0x1d, 0xaf, 0x5f, 0xd7, 0x8b, 0xd7, 0xbb, 0xcb, 0xcc, 0xac, 0x51, 0x6f, 0x5c, 0x39, - 0xc1, 0xaf, 0xe0, 0x27, 0x20, 0x8e, 0x08, 0x2e, 0x39, 0xf6, 0xd8, 0x0b, 0x11, 0x31, 0xff, 0x82, - 0x13, 0x9a, 0xd9, 0xf1, 0xee, 0xda, 0x4e, 0x5a, 0x87, 0x43, 0x4f, 0xf6, 0xbc, 0xf7, 0xbd, 0xf7, - 0xe6, 0xbd, 0xf7, 0xed, 0x7b, 0x83, 0x3e, 0x19, 0x7f, 0xc0, 0x2d, 0x3f, 0xb2, 0xc7, 0xc9, 0x00, - 0x58, 0x08, 0x02, 0xb8, 0x3d, 0x85, 0x70, 0x18, 0x31, 0x5b, 0x2b, 0x68, 0xec, 0xdb, 0x21, 0x88, - 0xef, 0x23, 0x36, 0xf6, 0x43, 0xcf, 0x9e, 0x1e, 0x0d, 0x40, 0xd0, 0x23, 0xdb, 0x83, 0x10, 0x18, - 0x15, 0x30, 0xb4, 0x62, 0x16, 0x89, 0x08, 0xdf, 0x4d, 0xe1, 0x16, 0x8d, 0x7d, 0x2b, 0x87, 0x5b, - 0x1a, 0xbe, 0x7f, 0xdf, 0xf3, 0xc5, 0x28, 0x19, 0x58, 0x6e, 0x34, 0xb1, 0xbd, 0xc8, 0x8b, 0x6c, - 0x65, 0x35, 0x48, 0x9e, 0xaa, 0x93, 0x3a, 0xa8, 0x7f, 0xa9, 0xb7, 0x7d, 0xb3, 0x10, 0xdc, 0x8d, - 0x18, 0xd8, 0xd3, 0x95, 0x88, 0xfb, 0x0f, 0x73, 0xcc, 0x84, 0xba, 0x23, 0x3f, 0x04, 0xf6, 0xcc, - 0x8e, 0xc7, 0x9e, 0x14, 0x70, 0x7b, 0x02, 0x82, 0x5e, 0x65, 0x65, 0x5f, 0x67, 0xc5, 0x92, 0x50, - 0xf8, 0x13, 0x58, 0x31, 0x78, 0xff, 0x55, 0x06, 0xdc, 0x1d, 0xc1, 0x84, 0xae, 0xd8, 0x3d, 0xb8, - 0xce, 0x2e, 0x11, 0x7e, 0x60, 0xfb, 0xa1, 0xe0, 0x82, 0x2d, 0x1b, 0x99, 0x7f, 0x1a, 0xe8, 0xce, - 0x49, 0xbf, 0xdf, 0x3d, 0x0d, 0x3d, 0x06, 0x9c, 0x77, 0xa9, 0x18, 0xe1, 0x03, 0xb4, 0x19, 0x53, - 0x31, 0x6a, 0x18, 0x07, 0xc6, 0xe1, 0x96, 0x53, 0x3f, 0xbf, 0x68, 0x6d, 0xcc, 0x2e, 0x5a, 0x9b, - 0x52, 0x47, 0x94, 0x06, 0x3f, 0x44, 0x55, 0xf9, 0xdb, 0x7f, 0x16, 0x43, 0xa3, 0xac, 0x50, 0x8d, - 0xd9, 0x45, 0xab, 0xda, 0xd5, 0xb2, 0x7f, 0x0b, 0xff, 0x49, 0x86, 0xc4, 0x5f, 0xa1, 0xdb, 0x03, - 0xea, 0x8e, 0x21, 0x1c, 0x36, 0x4a, 0x07, 0xc6, 0x61, 0xad, 0x7d, 0xdf, 0x7a, 0x69, 0x0f, 0x2d, - 0x7d, 0x29, 0x27, 0x35, 0x72, 0xee, 0xe8, 0x9b, 0xdc, 0xd6, 0x02, 0x32, 0x77, 0x67, 0x8e, 0xd1, - 0x5e, 0x21, 0x09, 0x92, 0x04, 0xf0, 0x05, 0x0d, 0x12, 0xc0, 0x3d, 0x54, 0x91, 0xd1, 0x79, 0xc3, - 0x38, 0x28, 0x1f, 0xd6, 0xda, 0xd6, 0x2b, 0xe2, 0x2d, 0x15, 0xc2, 0xd9, 0xd6, 0x01, 0x2b, 0xf2, - 0xc4, 0x49, 0xea, 0xcb, 0xfc, 0xa9, 0x84, 0x6e, 0x6b, 0x14, 0x7e, 0x82, 0xaa, 0xb2, 0xef, 0x43, - 0x2a, 0xa8, 0x2a, 0x57, 0xad, 0xfd, 0x6e, 0x21, 0x46, 0xd6, 0x06, 0x2b, 0x1e, 0x7b, 0x52, 0xc0, - 0x2d, 0x89, 0xb6, 0xa6, 0x47, 0xd6, 0xd9, 0xe0, 0x5b, 0x70, 0xc5, 0x67, 0x20, 0xa8, 0x83, 0x75, - 0x14, 0x94, 0xcb, 0x48, 0xe6, 0x15, 0x77, 0xd0, 0x26, 0x8f, 0xc1, 0xd5, 0x15, 0x7b, 0x7b, 0xbd, - 0x8a, 0xf5, 0x62, 0x70, 0xf3, 0xc6, 0xc9, 0x13, 0x51, 0x5e, 0x70, 0x1f, 0xdd, 0xe2, 0x82, 0x8a, - 0x84, 0xab, 0xb6, 0xd5, 0xda, 0xef, 0xac, 0xe9, 0x4f, 0xd9, 0x38, 0x3b, 0xda, 0xe3, 0xad, 0xf4, - 0x4c, 0xb4, 0x2f, 0xf3, 0xc7, 0x12, 0xda, 0x59, 0xec, 0x15, 0x7e, 0x0f, 0xd5, 0x38, 0xb0, 0xa9, - 0xef, 0xc2, 0x63, 0x3a, 0x01, 0x4d, 0xa5, 0x37, 0xb4, 0x7d, 0xad, 0x97, 0xab, 0x48, 0x11, 0x87, - 0xbd, 0xcc, 0xac, 0x1b, 0x31, 0xa1, 0x93, 0xbe, 0xbe, 0xa4, 0x92, 0xd9, 0x56, 0xca, 0x6c, 0xeb, - 0x34, 0x14, 0x67, 0xac, 0x27, 0x98, 0x1f, 0x7a, 0x2b, 0x81, 0xa4, 0x33, 0x52, 0xf4, 0x8c, 0xbf, - 0x44, 0x55, 0x06, 0x3c, 0x4a, 0x98, 0x0b, 0xba, 0x14, 0x0b, 0x64, 0x94, 0x23, 0x40, 0xb6, 0x49, - 0xf2, 0x76, 0xd8, 0x89, 0x5c, 0x1a, 0xa4, 0xcd, 0x21, 0xf0, 0x14, 0x18, 0x84, 0x2e, 0x38, 0x75, - 0x49, 0x78, 0xa2, 0x5d, 0x90, 0xcc, 0x99, 0xfc, 0xa0, 0xea, 0xba, 0x16, 0xc7, 0x01, 0x7d, 0x2d, - 0x14, 0xf9, 0x7c, 0x81, 0x22, 0xf6, 0x7a, 0x2d, 0x55, 0x97, 0xbb, 0x8e, 0x27, 0xe6, 0x1f, 0x06, - 0xda, 0x2d, 0x02, 0x3b, 0x3e, 0x17, 0xf8, 0x9b, 0x95, 0x4c, 0xac, 0xf5, 0x32, 0x91, 0xd6, 0x2a, - 0x8f, 0x5d, 0x1d, 0xaa, 0x3a, 0x97, 0x14, 0xb2, 0xe8, 0xa2, 0x8a, 0x2f, 0x60, 0xc2, 0x1b, 0x25, - 0xf5, 0xad, 0xde, 0xbb, 0x41, 0x1a, 0xf9, 0x87, 0x7a, 0x2a, 0x3d, 0x90, 0xd4, 0x91, 0xf9, 0x97, - 0x81, 0x5a, 0x45, 0x58, 0x97, 0x32, 0x3a, 0x01, 0x01, 0x8c, 0x67, 0x6d, 0xc4, 0x87, 0xa8, 0x4a, - 0xbb, 0xa7, 0x9f, 0xb2, 0x28, 0x89, 0xe7, 0xf3, 0x4e, 0xde, 0xef, 0x63, 0x2d, 0x23, 0x99, 0x56, - 0x4e, 0xc5, 0xb1, 0xaf, 0x47, 0x57, 0x61, 0x2a, 0x3e, 0xf2, 0xc3, 0x21, 0x51, 0x1a, 0x89, 0x08, - 0x25, 0xd9, 0xcb, 0x8b, 0x08, 0xc5, 0x72, 0xa5, 0xc1, 0x2d, 0x54, 0xe1, 0x6e, 0x14, 0x43, 0x63, - 0x53, 0x41, 0xb6, 0xe4, 0x95, 0x7b, 0x52, 0x40, 0x52, 0x39, 0xbe, 0x87, 0xb6, 0x24, 0x90, 0xc7, - 0xd4, 0x85, 0x46, 0x45, 0x81, 0xb6, 0x67, 0x17, 0xad, 0xad, 0xc7, 0x73, 0x21, 0xc9, 0xf5, 0xe6, - 0xaf, 0x4b, 0x4d, 0x92, 0xfd, 0xc3, 0x6d, 0x84, 0xdc, 0x28, 0x14, 0x2c, 0x0a, 0x02, 0x60, 0x3a, - 0xa5, 0x8c, 0x3e, 0xc7, 0x99, 0x86, 0x14, 0x50, 0x38, 0x44, 0x28, 0xce, 0x6a, 0xa3, 0x69, 0xf4, - 0xe1, 0x0d, 0xea, 0x7f, 0x45, 0x61, 0x9d, 0x1d, 0x19, 0xaf, 0xa0, 0x28, 0x44, 0x30, 0x7f, 0x33, - 0x50, 0x4d, 0xdb, 0xbf, 0x06, 0x62, 0x3d, 0x5a, 0x24, 0xd6, 0x9b, 0x6b, 0x2e, 0x9d, 0xab, 0x39, - 0xf5, 0x4b, 0x7e, 0x75, 0xb9, 0x66, 0x64, 0xcf, 0x47, 0x11, 0x17, 0xcb, 0xbb, 0xf2, 0x24, 0xe2, - 0x82, 0x28, 0x0d, 0x4e, 0xd0, 0xae, 0xbf, 0xb4, 0x97, 0x6e, 0xf6, 0xa5, 0x66, 0x66, 0x4e, 0x43, - 0xbb, 0xdf, 0x5d, 0xd6, 0x90, 0x95, 0x10, 0x26, 0xa0, 0x15, 0x94, 0x1c, 0x14, 0x23, 0x21, 0x62, - 0x5d, 0xe3, 0x07, 0xeb, 0x6f, 0xc3, 0xfc, 0x0a, 0x55, 0x95, 0x5d, 0xbf, 0xdf, 0x25, 0xca, 0x95, - 0xf9, 0x7b, 0x29, 0xab, 0x87, 0xa2, 0xdf, 0x47, 0x59, 0xb6, 0x8a, 0x19, 0x6a, 0xf8, 0xa7, 0x64, - 0xdf, 0x2b, 0x5c, 0x3c, 0xd3, 0x91, 0x15, 0x34, 0xee, 0xe7, 0xaf, 0x04, 0xe3, 0xff, 0xbc, 0x12, - 0x6a, 0x57, 0xbd, 0x10, 0xf0, 0x09, 0x2a, 0x8b, 0x60, 0x4e, 0x81, 0xb7, 0xd6, 0xf3, 0xd8, 0xef, - 0xf4, 0x9c, 0x9a, 0x2e, 0x79, 0xb9, 0xdf, 0xe9, 0x11, 0xe9, 0x02, 0x9f, 0xa1, 0x0a, 0x4b, 0x02, - 0x90, 0x1b, 0xb4, 0xbc, 0xfe, 0x46, 0x96, 0x15, 0xcc, 0x29, 0x25, 0x4f, 0x9c, 0xa4, 0x7e, 0xcc, - 0xef, 0xd0, 0xf6, 0xc2, 0x9a, 0xc5, 0x4f, 0x50, 0x3d, 0x88, 0xe8, 0xd0, 0xa1, 0x01, 0x0d, 0x5d, - 0xfd, 0x11, 0x2f, 0xf1, 0x76, 0xbe, 0x9f, 0x3a, 0x05, 0x9c, 0x5e, 0xd2, 0x7b, 0x3a, 0x48, 0xbd, - 0xa8, 0x23, 0x0b, 0x1e, 0x4d, 0x8a, 0x50, 0x9e, 0xa3, 0x9c, 0x4a, 0x92, 0xa9, 0xe9, 0x2b, 0x49, - 0x4f, 0x25, 0x49, 0x60, 0x4e, 0x52, 0xb9, 0x9c, 0x29, 0x1c, 0x5c, 0x06, 0x42, 0xb5, 0xb3, 0xb4, - 0x38, 0x53, 0x7a, 0x99, 0x86, 0x14, 0x50, 0xce, 0xf1, 0xf9, 0x65, 0x73, 0xe3, 0xf9, 0x65, 0x73, - 0xe3, 0xc5, 0x65, 0x73, 0xe3, 0x87, 0x59, 0xd3, 0x38, 0x9f, 0x35, 0x8d, 0xe7, 0xb3, 0xa6, 0xf1, - 0x62, 0xd6, 0x34, 0xfe, 0x9e, 0x35, 0x8d, 0x9f, 0xff, 0x69, 0x6e, 0x7c, 0x7d, 0xf7, 0xa5, 0x6f, - 0xfe, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xbc, 0xb0, 0xe2, 0xe8, 0x2c, 0x0c, 0x00, 0x00, + // 1247 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcb, 0x6f, 0x1b, 0x45, + 0x18, 0xcf, 0xda, 0x71, 0xe3, 0x8c, 0xd3, 0x36, 0x0c, 0x3d, 0x98, 0xa0, 0xda, 0xd1, 0x1e, 0x50, + 0xa0, 0xed, 0x6e, 0x93, 0x16, 0x54, 0x2e, 0x08, 0x36, 0x02, 0x12, 0x25, 0x24, 0x66, 0x6c, 0x1e, + 0x42, 0x1c, 0x18, 0xaf, 0xa7, 0xf6, 0xe2, 0xf5, 0xee, 0x6a, 0x66, 0x36, 0xa8, 0x37, 0x10, 0x27, + 0x4e, 0xf0, 0x3f, 0x20, 0xf1, 0x27, 0x20, 0x2e, 0x48, 0x08, 0x2e, 0x39, 0xf6, 0xd8, 0x0b, 0x11, + 0x31, 0xff, 0x45, 0x4f, 0xe8, 0x9b, 0x9d, 0x7d, 0xf8, 0x91, 0xd6, 0xe1, 0xd0, 0x53, 0xbc, 0xdf, + 0xe3, 0xf7, 0xbd, 0xbf, 0xf9, 0x82, 0x3e, 0x18, 0x3e, 0x10, 0x96, 0x17, 0xda, 0xc3, 0xb8, 0xcb, + 0x78, 0xc0, 0x24, 0x13, 0xf6, 0x09, 0x0b, 0x7a, 0x21, 0xb7, 0x35, 0x83, 0x46, 0x9e, 0x1d, 0x30, + 0xf9, 0x4d, 0xc8, 0x87, 0x5e, 0xd0, 0xb7, 0x4f, 0xb6, 0xbb, 0x4c, 0xd2, 0x6d, 0xbb, 0xcf, 0x02, + 0xc6, 0xa9, 0x64, 0x3d, 0x2b, 0xe2, 0xa1, 0x0c, 0xf1, 0xcd, 0x44, 0xdc, 0xa2, 0x91, 0x67, 0xe5, + 0xe2, 0x96, 0x16, 0xdf, 0xb8, 0xd3, 0xf7, 0xe4, 0x20, 0xee, 0x5a, 0x6e, 0x38, 0xb2, 0xfb, 0x61, + 0x3f, 0xb4, 0x95, 0x56, 0x37, 0x7e, 0xa8, 0xbe, 0xd4, 0x87, 0xfa, 0x95, 0xa0, 0x6d, 0x98, 0x05, + 0xe3, 0x6e, 0xc8, 0x99, 0x7d, 0x32, 0x63, 0x71, 0xe3, 0x7e, 0x2e, 0x33, 0xa2, 0xee, 0xc0, 0x0b, + 0x18, 0x7f, 0x64, 0x47, 0xc3, 0x3e, 0x10, 0x84, 0x3d, 0x62, 0x92, 0xce, 0xd3, 0xb2, 0x2f, 0xd2, + 0xe2, 0x71, 0x20, 0xbd, 0x11, 0x9b, 0x51, 0x78, 0xeb, 0x79, 0x0a, 0xc2, 0x1d, 0xb0, 0x11, 0x9d, + 0xd1, 0xbb, 0x77, 0x91, 0x5e, 0x2c, 0x3d, 0xdf, 0xf6, 0x02, 0x29, 0x24, 0x9f, 0x56, 0x32, 0xff, + 0x32, 0xd0, 0xf5, 0xbd, 0x4e, 0xa7, 0xb5, 0x1f, 0xf4, 0x39, 0x13, 0xa2, 0x45, 0xe5, 0x00, 0x6f, + 0xa2, 0xe5, 0x88, 0xca, 0x41, 0xdd, 0xd8, 0x34, 0xb6, 0x56, 0x9d, 0xb5, 0xd3, 0xb3, 0xe6, 0xd2, + 0xf8, 0xac, 0xb9, 0x0c, 0x3c, 0xa2, 0x38, 0xf8, 0x3e, 0xaa, 0xc2, 0xdf, 0xce, 0xa3, 0x88, 0xd5, + 0xcb, 0x4a, 0xaa, 0x3e, 0x3e, 0x6b, 0x56, 0x5b, 0x9a, 0xf6, 0xb4, 0xf0, 0x9b, 0x64, 0x92, 0xf8, + 0x73, 0xb4, 0xd2, 0xa5, 0xee, 0x90, 0x05, 0xbd, 0x7a, 0x69, 0xd3, 0xd8, 0xaa, 0xed, 0xdc, 0xb1, + 0x9e, 0x59, 0x43, 0x4b, 0x3b, 0xe5, 0x24, 0x4a, 0xce, 0x75, 0xed, 0xc9, 0x8a, 0x26, 0x90, 0x14, + 0xce, 0x1c, 0xa2, 0x1b, 0x85, 0x20, 0x48, 0xec, 0xb3, 0x4f, 0xa9, 0x1f, 0x33, 0xdc, 0x46, 0x15, + 0xb0, 0x2e, 0xea, 0xc6, 0x66, 0x79, 0xab, 0xb6, 0x63, 0x3d, 0xc7, 0xde, 0x54, 0x22, 0x9c, 0xab, + 0xda, 0x60, 0x05, 0xbe, 0x04, 0x49, 0xb0, 0xcc, 0x1f, 0x4b, 0x68, 0x45, 0x4b, 0xe1, 0xaf, 0x50, + 0x15, 0xea, 0xde, 0xa3, 0x92, 0xaa, 0x74, 0xd5, 0x76, 0xee, 0x16, 0x6c, 0x64, 0x65, 0xb0, 0xa2, + 0x61, 0x1f, 0x08, 0xc2, 0x02, 0x69, 0xeb, 0x64, 0xdb, 0x3a, 0xee, 0x7e, 0xcd, 0x5c, 0xf9, 0x11, + 0x93, 0xd4, 0xc1, 0xda, 0x0a, 0xca, 0x69, 0x24, 0x43, 0xc5, 0x87, 0x68, 0x59, 0x44, 0xcc, 0xd5, + 0x19, 0x7b, 0x63, 0xb1, 0x8c, 0xb5, 0x23, 0xe6, 0xe6, 0x85, 0x83, 0x2f, 0xa2, 0x50, 0x70, 0x07, + 0x5d, 0x11, 0x92, 0xca, 0x58, 0xa8, 0xb2, 0xd5, 0x76, 0x6e, 0x2f, 0x88, 0xa7, 0x74, 0x9c, 0x6b, + 0x1a, 0xf1, 0x4a, 0xf2, 0x4d, 0x34, 0x96, 0xf9, 0x43, 0x09, 0x5d, 0x9b, 0xac, 0x15, 0x7e, 0x13, + 0xd5, 0x04, 0xe3, 0x27, 0x9e, 0xcb, 0x8e, 0xe8, 0x88, 0xe9, 0x56, 0x7a, 0x59, 0xeb, 0xd7, 0xda, + 0x39, 0x8b, 0x14, 0xe5, 0x70, 0x3f, 0x53, 0x6b, 0x85, 0x5c, 0xea, 0xa0, 0x2f, 0x4e, 0x29, 0x74, + 0xb6, 0x95, 0x74, 0xb6, 0xb5, 0x1f, 0xc8, 0x63, 0xde, 0x96, 0xdc, 0x0b, 0xfa, 0x33, 0x86, 0x00, + 0x8c, 0x14, 0x91, 0xf1, 0x67, 0xa8, 0xca, 0x99, 0x08, 0x63, 0xee, 0x32, 0x9d, 0x8a, 0x89, 0x66, + 0x84, 0x15, 0x00, 0x65, 0x82, 0xbe, 0xed, 0x1d, 0x86, 0x2e, 0xf5, 0x93, 0xe2, 0x10, 0xf6, 0x90, + 0x71, 0x16, 0xb8, 0xcc, 0x59, 0x83, 0x86, 0x27, 0x1a, 0x82, 0x64, 0x60, 0x30, 0x50, 0x6b, 0x3a, + 0x17, 0xbb, 0x3e, 0x7d, 0x21, 0x2d, 0xf2, 0xf1, 0x44, 0x8b, 0xd8, 0x8b, 0x95, 0x54, 0x39, 0x77, + 0x51, 0x9f, 0x98, 0x7f, 0x1a, 0x68, 0xbd, 0x28, 0x78, 0xe8, 0x09, 0x89, 0xbf, 0x9c, 0x89, 0xc4, + 0x5a, 0x2c, 0x12, 0xd0, 0x56, 0x71, 0xac, 0x6b, 0x53, 0xd5, 0x94, 0x52, 0x88, 0xa2, 0x85, 0x2a, + 0x9e, 0x64, 0x23, 0x51, 0x2f, 0xa9, 0x59, 0xbd, 0x75, 0x89, 0x30, 0xf2, 0x41, 0xdd, 0x07, 0x04, + 0x92, 0x00, 0x99, 0x7f, 0x1b, 0xa8, 0x59, 0x14, 0x6b, 0x51, 0x4e, 0x47, 0x4c, 0x32, 0x2e, 0xb2, + 0x32, 0xe2, 0x2d, 0x54, 0xa5, 0xad, 0xfd, 0x0f, 0x79, 0x18, 0x47, 0xe9, 0xbe, 0x03, 0xff, 0xde, + 0xd3, 0x34, 0x92, 0x71, 0x61, 0x2b, 0x0e, 0x3d, 0xbd, 0xba, 0x0a, 0x5b, 0xf1, 0xc0, 0x0b, 0x7a, + 0x44, 0x71, 0x40, 0x22, 0x80, 0x66, 0x2f, 0x4f, 0x4a, 0xa8, 0x2e, 0x57, 0x1c, 0xdc, 0x44, 0x15, + 0xe1, 0x86, 0x11, 0xab, 0x2f, 0x2b, 0x91, 0x55, 0x70, 0xb9, 0x0d, 0x04, 0x92, 0xd0, 0xf1, 0x2d, + 0xb4, 0x0a, 0x82, 0x22, 0xa2, 0x2e, 0xab, 0x57, 0x94, 0xd0, 0xd5, 0xf1, 0x59, 0x73, 0xf5, 0x28, + 0x25, 0x92, 0x9c, 0x6f, 0xfe, 0x3a, 0x55, 0x24, 0xa8, 0x1f, 0xde, 0x41, 0xc8, 0x0d, 0x03, 0xc9, + 0x43, 0xdf, 0x67, 0x5c, 0x87, 0x94, 0xb5, 0xcf, 0x6e, 0xc6, 0x21, 0x05, 0x29, 0x1c, 0x20, 0x14, + 0x65, 0xb9, 0xd1, 0x6d, 0xf4, 0xce, 0x25, 0xf2, 0x3f, 0x27, 0xb1, 0xce, 0x35, 0xb0, 0x57, 0x60, + 0x14, 0x2c, 0x98, 0xbf, 0x19, 0xa8, 0xa6, 0xf5, 0x5f, 0x40, 0x63, 0x1d, 0x4c, 0x36, 0xd6, 0x6b, + 0x0b, 0x3e, 0x3a, 0xf3, 0x7b, 0xea, 0x77, 0x03, 0x6d, 0xa4, 0xae, 0x87, 0xb4, 0xe7, 0x50, 0x9f, + 0x06, 0x2e, 0xe3, 0xe9, 0x7b, 0xb0, 0x81, 0x4a, 0x5e, 0xda, 0x48, 0x48, 0x03, 0x94, 0xf6, 0x5b, + 0xa4, 0xe4, 0x45, 0xf8, 0x36, 0xaa, 0x0e, 0x42, 0x21, 0x55, 0x8b, 0x24, 0x4d, 0x94, 0x79, 0xbd, + 0xa7, 0xe9, 0x24, 0x93, 0xc0, 0x9f, 0xa0, 0x4a, 0x14, 0x72, 0x29, 0xea, 0xcb, 0xca, 0xeb, 0xbb, + 0x8b, 0x79, 0x0d, 0xbb, 0x4d, 0x2f, 0xeb, 0xfc, 0xf1, 0x02, 0x18, 0x92, 0xa0, 0x99, 0xdf, 0x19, + 0xe8, 0x95, 0x39, 0xfe, 0x27, 0x3a, 0xb8, 0x87, 0x56, 0xbc, 0x84, 0xa9, 0x5f, 0xcc, 0xb7, 0x17, + 0x33, 0x3b, 0x27, 0x15, 0xf9, 0x6b, 0x9d, 0xbe, 0xca, 0x29, 0xb4, 0xf9, 0xb3, 0x81, 0x5e, 0x9a, + 0xf1, 0x57, 0x5d, 0x1d, 0xb0, 0xf3, 0x21, 0x79, 0x95, 0xc2, 0xd5, 0x01, 0xab, 0x5b, 0x71, 0xf0, + 0x01, 0xaa, 0xaa, 0xa3, 0xc5, 0x0d, 0x7d, 0x9d, 0x40, 0x3b, 0x4d, 0x60, 0x4b, 0xd3, 0x9f, 0x9e, + 0x35, 0x5f, 0x9d, 0xbd, 0xe4, 0xac, 0x94, 0x4d, 0x32, 0x00, 0x18, 0x45, 0xc6, 0x79, 0xc8, 0xf5, + 0xb4, 0xaa, 0x51, 0x7c, 0x1f, 0x08, 0x24, 0xa1, 0x9b, 0xbf, 0xe4, 0x4d, 0x0a, 0x07, 0x05, 0xf8, + 0x07, 0xc5, 0x99, 0xbe, 0x8a, 0xa0, 0x74, 0x44, 0x71, 0x70, 0x8c, 0xd6, 0xbd, 0xa9, 0x0b, 0xe4, + 0x72, 0x3b, 0x39, 0x53, 0x73, 0xea, 0x1a, 0x7e, 0x7d, 0x9a, 0x43, 0x66, 0x4c, 0x98, 0x0c, 0xcd, + 0x48, 0xc1, 0x93, 0x30, 0x90, 0x32, 0xd2, 0xd3, 0x74, 0x6f, 0xf1, 0xbb, 0x27, 0x77, 0xa1, 0xaa, + 0xa2, 0xeb, 0x74, 0x5a, 0x44, 0x41, 0x99, 0x7f, 0x94, 0xb2, 0x7c, 0xa8, 0x45, 0xf3, 0x6e, 0x16, + 0xad, 0xda, 0x01, 0xea, 0x99, 0x4f, 0xd6, 0xda, 0x8d, 0x82, 0xe3, 0x19, 0x8f, 0xcc, 0x48, 0xe3, + 0x4e, 0x7e, 0x0f, 0x1a, 0xff, 0xe7, 0x1e, 0xac, 0xcd, 0xbb, 0x05, 0xf1, 0x1e, 0x2a, 0x4b, 0x3f, + 0x1d, 0xf6, 0xd7, 0x17, 0x43, 0xec, 0x1c, 0xb6, 0x9d, 0x9a, 0x4e, 0x79, 0xb9, 0x73, 0xd8, 0x26, + 0x00, 0x81, 0x8f, 0x51, 0x85, 0xc7, 0x3e, 0x83, 0x5b, 0xa9, 0xbc, 0xf8, 0xed, 0x05, 0x19, 0xcc, + 0x87, 0x0f, 0xbe, 0x04, 0x49, 0x70, 0xcc, 0xef, 0x0d, 0x74, 0x75, 0xe2, 0xa2, 0xc2, 0x1c, 0xad, + 0xf9, 0x85, 0xd9, 0xd1, 0x79, 0x78, 0x70, 0xf9, 0xa9, 0xd3, 0x43, 0x7f, 0x43, 0xdb, 0x5d, 0x2b, + 0xf2, 0xc8, 0x84, 0x0d, 0x93, 0x22, 0x94, 0x87, 0x0d, 0x73, 0x00, 0xcd, 0x9b, 0x0c, 0xbc, 0x9e, + 0x03, 0xe8, 0x69, 0x41, 0x12, 0x3a, 0x3c, 0x28, 0x82, 0xb9, 0x9c, 0xc9, 0xa3, 0x7c, 0x71, 0x65, + 0x0f, 0x4a, 0x3b, 0xe3, 0x90, 0x82, 0x94, 0xb3, 0x7b, 0x7a, 0xde, 0x58, 0x7a, 0x7c, 0xde, 0x58, + 0x7a, 0x72, 0xde, 0x58, 0xfa, 0x76, 0xdc, 0x30, 0x4e, 0xc7, 0x0d, 0xe3, 0xf1, 0xb8, 0x61, 0x3c, + 0x19, 0x37, 0x8c, 0x7f, 0xc6, 0x0d, 0xe3, 0xa7, 0x7f, 0x1b, 0x4b, 0x5f, 0xdc, 0x7c, 0xe6, 0x3f, + 0x7c, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x08, 0x04, 0x22, 0x31, 0x29, 0x0e, 0x00, 0x00, } func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { @@ -945,6 +1043,128 @@ func (m *IngressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *IngressLoadBalancerIngress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressLoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressLoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x12 + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressLoadBalancerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressLoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressLoadBalancerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *IngressPortStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressPortStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressPortStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + i -= len(*m.Error) + copy(dAtA[i:], *m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Error))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *IngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1320,6 +1540,56 @@ func (m *IngressList) Size() (n int) { return n } +func (m *IngressLoadBalancerIngress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IngressLoadBalancerStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IngressPortStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + if m.Error != nil { + l = len(*m.Error) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *IngressRule) Size() (n int) { if m == nil { return 0 @@ -1528,6 +1798,50 @@ func (this *IngressList) String() string { }, "") return s } +func (this *IngressLoadBalancerIngress) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]IngressPortStatus{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "IngressPortStatus", "IngressPortStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&IngressLoadBalancerIngress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `}`, + }, "") + return s +} +func (this *IngressLoadBalancerStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForIngress := "[]IngressLoadBalancerIngress{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "IngressLoadBalancerIngress", "IngressLoadBalancerIngress", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + s := strings.Join([]string{`&IngressLoadBalancerStatus{`, + `Ingress:` + repeatedStringForIngress + `,`, + `}`, + }, "") + return s +} +func (this *IngressPortStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressPortStatus{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Error:` + valueToStringGenerated(this.Error) + `,`, + `}`, + }, "") + return s +} func (this *IngressRule) String() string { if this == nil { return "nil" @@ -1577,7 +1891,7 @@ func (this *IngressStatus) String() string { return "nil" } s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LoadBalancer), "LoadBalancerStatus", "v11.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "IngressLoadBalancerStatus", "IngressLoadBalancerStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2814,6 +3128,372 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } return nil } +func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, IngressPortStatus{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Error = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *IngressRule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto index 439c89050..46bb7f66f 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.proto +++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -33,14 +33,14 @@ option go_package = "k8s.io/api/networking/v1beta1"; // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. message HTTPIngressPath { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional optional string path = 1; - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -57,7 +57,7 @@ message HTTPIngressPath { // Defaults to ImplementationSpecific. optional string pathType = 3; - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. optional IngressBackend backend = 2; } @@ -68,7 +68,7 @@ message HTTPIngressPath { // to match against everything after the last '/' and before the first '?' // or '#'. message HTTPIngressRuleValue { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. repeated HTTPIngressPath paths = 1; } @@ -82,12 +82,12 @@ message Ingress { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressSpec spec = 2; - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressStatus status = 3; @@ -95,15 +95,15 @@ message Ingress { // IngressBackend describes all endpoints for a given service and port. message IngressBackend { - // Specifies the name of the referenced service. + // serviceName specifies the name of the referenced service. // +optional optional string serviceName = 1; - // Specifies the port of the referenced service. + // servicePort Specifies the port of the referenced service. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, serviceName and servicePort // must not be specified. // +optional @@ -121,7 +121,7 @@ message IngressClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressClassSpec spec = 2; @@ -133,30 +133,30 @@ message IngressClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of IngressClasses. + // items is the list of IngressClasses. repeated IngressClass items = 2; } // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. message IngressClassParametersReference { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional optional string aPIGroup = 1; - // Kind is the type of resource being referenced. + // kind is the type of resource being referenced. optional string kind = 2; - // Name is the name of resource being referenced. + // name is the name of resource being referenced. optional string name = 3; - // Scope represents if this refers to a cluster or namespace scoped resource. + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". optional string scope = 4; - // Namespace is the namespace of the resource being referenced. This field is + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -165,15 +165,15 @@ message IngressClassParametersReference { // IngressClassSpec provides information about the class of an Ingress. message IngressClassSpec { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. optional string controller = 1; - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -187,15 +187,62 @@ message IngressList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of Ingress. + // items is the list of Ingress. repeated Ingress items = 2; } +// IngressLoadBalancerIngress represents the status of a load-balancer ingress point. +message IngressLoadBalancerIngress { + // ip is set for load-balancer ingress points that are IP based. + // +optional + optional string ip = 1; + + // hostname is set for load-balancer ingress points that are DNS based. + // +optional + optional string hostname = 2; + + // ports provides information about the ports exposed by this LoadBalancer. + // +listType=atomic + // +optional + repeated IngressPortStatus ports = 4; +} + +// LoadBalancerStatus represents the status of a load-balancer. +message IngressLoadBalancerStatus { + // ingress is a list containing ingress points for the load-balancer. + // +optional + repeated IngressLoadBalancerIngress ingress = 1; +} + +// IngressPortStatus represents the error condition of a service port +message IngressPortStatus { + // port is the port number of the ingress port. + optional int32 port = 1; + + // protocol is the protocol of the ingress port. + // The supported values are: "TCP", "UDP", "SCTP" + optional string protocol = 2; + + // error is to record the problem with the service port + // The format of the error shall comply with the following rules: + // - built-in error values shall be specified in this file and those shall use + // CamelCase names + // - cloud provider specific error values must have names that comply with the + // format foo.example.com/CamelCase. + // --- + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + optional string error = 3; +} + // IngressRule represents the rules mapping the paths under a specified host to // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. message IngressRule { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -208,7 +255,7 @@ message IngressRule { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and @@ -240,7 +287,7 @@ message IngressRuleValue { // IngressSpec describes the Ingress the user wishes to exist. message IngressSpec { - // IngressClassName is the name of the IngressClass cluster resource. The + // ingressClassName is the name of the IngressClass cluster resource. The // associated IngressClass defines which controller will implement the // resource. This replaces the deprecated `kubernetes.io/ingress.class` // annotation. For backwards compatibility, when that annotation is set, it @@ -253,44 +300,44 @@ message IngressSpec { // +optional optional string ingressClassName = 4; - // A default backend capable of servicing requests that don't match any + // backend is the default backend capable of servicing requests that don't match any // rule. At least one of 'backend' or 'rules' must be specified. This field // is optional to allow the loadbalancer controller or defaulting logic to // specify a global default. // +optional optional IngressBackend backend = 1; - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +optional repeated IngressTLS tls = 2; - // A list of host rules used to configure the Ingress. If unspecified, or + // rules is a list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. // +optional repeated IngressRule rules = 3; } -// IngressStatus describe the current state of the Ingress. +// IngressStatus describes the current state of the Ingress. message IngressStatus { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional - optional k8s.io.api.core.v1.LoadBalancerStatus loadBalancer = 1; + optional IngressLoadBalancerStatus loadBalancer = 1; } // IngressTLS describes the transport layer security associated with an Ingress. message IngressTLS { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +optional repeated string hosts = 1; - // SecretName is the name of the secret used to terminate TLS traffic on + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go index 1bfdcd091..87cc91654 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types.go +++ b/vendor/k8s.io/api/networking/v1beta1/types.go @@ -34,17 +34,18 @@ import ( // based virtual hosting etc. type Ingress struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` @@ -58,18 +59,19 @@ type Ingress struct { // IngressList is a collection of Ingress. type IngressList struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of Ingress. + // items is the list of Ingress. Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` } // IngressSpec describes the Ingress the user wishes to exist. type IngressSpec struct { - // IngressClassName is the name of the IngressClass cluster resource. The + // ingressClassName is the name of the IngressClass cluster resource. The // associated IngressClass defines which controller will implement the // resource. This replaces the deprecated `kubernetes.io/ingress.class` // annotation. For backwards compatibility, when that annotation is set, it @@ -82,22 +84,22 @@ type IngressSpec struct { // +optional IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"` - // A default backend capable of servicing requests that don't match any + // backend is the default backend capable of servicing requests that don't match any // rule. At least one of 'backend' or 'rules' must be specified. This field // is optional to allow the loadbalancer controller or defaulting logic to // specify a global default. // +optional Backend *IngressBackend `json:"backend,omitempty" protobuf:"bytes,1,opt,name=backend"` - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +optional TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` - // A list of host rules used to configure the Ingress. If unspecified, or + // rules is a list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. // +optional Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` @@ -106,13 +108,14 @@ type IngressSpec struct { // IngressTLS describes the transport layer security associated with an Ingress. type IngressTLS struct { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +optional Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` - // SecretName is the name of the secret used to terminate TLS traffic on + + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination @@ -122,18 +125,65 @@ type IngressTLS struct { // TODO: Consider specifying different modes of termination, protocols etc. } -// IngressStatus describe the current state of the Ingress. +// IngressStatus describes the current state of the Ingress. type IngressStatus struct { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. + // +optional + LoadBalancer IngressLoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` +} + +// LoadBalancerStatus represents the status of a load-balancer. +type IngressLoadBalancerStatus struct { + // ingress is a list containing ingress points for the load-balancer. // +optional - LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` + Ingress []IngressLoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` +} + +// IngressLoadBalancerIngress represents the status of a load-balancer ingress point. +type IngressLoadBalancerIngress struct { + // ip is set for load-balancer ingress points that are IP based. + // +optional + IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + + // hostname is set for load-balancer ingress points that are DNS based. + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` + + // ports provides information about the ports exposed by this LoadBalancer. + // +listType=atomic + // +optional + Ports []IngressPortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"` +} + +// IngressPortStatus represents the error condition of a service port +type IngressPortStatus struct { + // port is the port number of the ingress port. + Port int32 `json:"port" protobuf:"varint,1,opt,name=port"` + + // protocol is the protocol of the ingress port. + // The supported values are: "TCP", "UDP", "SCTP" + Protocol v1.Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` + + // error is to record the problem with the service port + // The format of the error shall comply with the following rules: + // - built-in error values shall be specified in this file and those shall use + // CamelCase names + // - cloud provider specific error values must have names that comply with the + // format foo.example.com/CamelCase. + // --- + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` } // IngressRule represents the rules mapping the paths under a specified host to // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -146,7 +196,7 @@ type IngressRule struct { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and @@ -157,6 +207,7 @@ type IngressRule struct { // is to equal to the suffix (removing the first label) of the wildcard rule. // +optional Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` + // IngressRuleValue represents a rule to route requests for this IngressRule. // If unspecified, the rule defaults to a http catch-all. Whether that sends // just traffic matching the host to the default backend or all traffic to the @@ -187,7 +238,7 @@ type IngressRuleValue struct { // to match against everything after the last '/' and before the first '?' // or '#'. type HTTPIngressRuleValue struct { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` // TODO: Consider adding fields for ingress-type specific global // options usable by a loadbalancer, like http keep-alive. @@ -226,14 +277,14 @@ const ( // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. type HTTPIngressPath struct { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -250,22 +301,22 @@ type HTTPIngressPath struct { // Defaults to ImplementationSpecific. PathType *PathType `json:"pathType,omitempty" protobuf:"bytes,3,opt,name=pathType"` - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` } // IngressBackend describes all endpoints for a given service and port. type IngressBackend struct { - // Specifies the name of the referenced service. + // serviceName specifies the name of the referenced service. // +optional ServiceName string `json:"serviceName,omitempty" protobuf:"bytes,1,opt,name=serviceName"` - // Specifies the port of the referenced service. + // servicePort Specifies the port of the referenced service. // +optional ServicePort intstr.IntOrString `json:"servicePort,omitempty" protobuf:"bytes,2,opt,name=servicePort"` - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, serviceName and servicePort // must not be specified. // +optional @@ -286,12 +337,13 @@ type IngressBackend struct { // resources without a class specified will be assigned this default class. type IngressClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressClassSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -299,15 +351,15 @@ type IngressClass struct { // IngressClassSpec provides information about the class of an Ingress. type IngressClassSpec struct { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. Controller string `json:"controller,omitempty" protobuf:"bytes,1,opt,name=controller"` - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -318,7 +370,7 @@ const ( // IngressClassParametersReferenceScopeNamespace indicates that the // referenced Parameters resource is namespace-scoped. IngressClassParametersReferenceScopeNamespace = "Namespace" - // IngressClassParametersReferenceScopeNamespace indicates that the + // IngressClassParametersReferenceScopeCluster indicates that the // referenced Parameters resource is cluster-scoped. IngressClassParametersReferenceScopeCluster = "Cluster" ) @@ -326,19 +378,23 @@ const ( // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. type IngressClassParametersReference struct { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional APIGroup *string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=aPIGroup"` - // Kind is the type of resource being referenced. + + // kind is the type of resource being referenced. Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` - // Name is the name of resource being referenced. + + // name is the name of resource being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` - // Scope represents if this refers to a cluster or namespace scoped resource. + + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". Scope *string `json:"scope" protobuf:"bytes,4,opt,name=scope"` - // Namespace is the namespace of the resource being referenced. This field is + + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -357,6 +413,6 @@ type IngressClassList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of IngressClasses. + // items is the list of IngressClasses. Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go index 79c42a6ba..b2373669f 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go @@ -24,14 +24,14 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", - "pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.", - "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", + "path": "path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", + "pathType": "pathType determines the interpretation of the path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.", + "backend": "backend defines the referenced service endpoint to which the traffic will be forwarded to.", } func (HTTPIngressPath) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (HTTPIngressPath) SwaggerDoc() map[string]string { var map_HTTPIngressRuleValue = map[string]string{ "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", - "paths": "A collection of paths that map requests to backends.", + "paths": "paths is a collection of paths that map requests to backends.", } func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { @@ -50,8 +50,8 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -60,9 +60,9 @@ func (Ingress) SwaggerDoc() map[string]string { var map_IngressBackend = map[string]string{ "": "IngressBackend describes all endpoints for a given service and port.", - "serviceName": "Specifies the name of the referenced service.", - "servicePort": "Specifies the port of the referenced service.", - "resource": "Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.", + "serviceName": "serviceName specifies the name of the referenced service.", + "servicePort": "servicePort Specifies the port of the referenced service.", + "resource": "resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.", } func (IngressBackend) SwaggerDoc() map[string]string { @@ -72,7 +72,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressClass = map[string]string{ "": "IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (IngressClass) SwaggerDoc() map[string]string { @@ -82,7 +82,7 @@ func (IngressClass) SwaggerDoc() map[string]string { var map_IngressClassList = map[string]string{ "": "IngressClassList is a collection of IngressClasses.", "metadata": "Standard list metadata.", - "items": "Items is the list of IngressClasses.", + "items": "items is the list of IngressClasses.", } func (IngressClassList) SwaggerDoc() map[string]string { @@ -91,11 +91,11 @@ func (IngressClassList) SwaggerDoc() map[string]string { var map_IngressClassParametersReference = map[string]string{ "": "IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource.", - "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", - "kind": "Kind is the type of resource being referenced.", - "name": "Name is the name of resource being referenced.", - "scope": "Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", - "namespace": "Namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", + "apiGroup": "apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "kind": "kind is the type of resource being referenced.", + "name": "name is the name of resource being referenced.", + "scope": "scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", + "namespace": "namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", } func (IngressClassParametersReference) SwaggerDoc() map[string]string { @@ -104,8 +104,8 @@ func (IngressClassParametersReference) SwaggerDoc() map[string]string { var map_IngressClassSpec = map[string]string{ "": "IngressClassSpec provides information about the class of an Ingress.", - "controller": "Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", - "parameters": "Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", + "controller": "controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", + "parameters": "parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", } func (IngressClassSpec) SwaggerDoc() map[string]string { @@ -115,16 +115,47 @@ func (IngressClassSpec) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of Ingress.", + "items": "items is the list of Ingress.", } func (IngressList) SwaggerDoc() map[string]string { return map_IngressList } +var map_IngressLoadBalancerIngress = map[string]string{ + "": "IngressLoadBalancerIngress represents the status of a load-balancer ingress point.", + "ip": "ip is set for load-balancer ingress points that are IP based.", + "hostname": "hostname is set for load-balancer ingress points that are DNS based.", + "ports": "ports provides information about the ports exposed by this LoadBalancer.", +} + +func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { + return map_IngressLoadBalancerIngress +} + +var map_IngressLoadBalancerStatus = map[string]string{ + "": "LoadBalancerStatus represents the status of a load-balancer.", + "ingress": "ingress is a list containing ingress points for the load-balancer.", +} + +func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { + return map_IngressLoadBalancerStatus +} + +var map_IngressPortStatus = map[string]string{ + "": "IngressPortStatus represents the error condition of a service port", + "port": "port is the port number of the ingress port.", + "protocol": "protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "error": "error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", +} + +func (IngressPortStatus) SwaggerDoc() map[string]string { + return map_IngressPortStatus +} + var map_IngressRule = map[string]string{ "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", - "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", + "host": "host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nhost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", } func (IngressRule) SwaggerDoc() map[string]string { @@ -141,10 +172,10 @@ func (IngressRuleValue) SwaggerDoc() map[string]string { var map_IngressSpec = map[string]string{ "": "IngressSpec describes the Ingress the user wishes to exist.", - "ingressClassName": "IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.", - "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", - "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", - "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", + "ingressClassName": "ingressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.", + "backend": "backend is the default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", + "tls": "tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", } func (IngressSpec) SwaggerDoc() map[string]string { @@ -152,8 +183,8 @@ func (IngressSpec) SwaggerDoc() map[string]string { } var map_IngressStatus = map[string]string{ - "": "IngressStatus describe the current state of the Ingress.", - "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", + "": "IngressStatus describes the current state of the Ingress.", + "loadBalancer": "loadBalancer contains the current status of the load-balancer.", } func (IngressStatus) SwaggerDoc() map[string]string { @@ -162,8 +193,8 @@ func (IngressStatus) SwaggerDoc() map[string]string { var map_IngressTLS = map[string]string{ "": "IngressTLS describes the transport layer security associated with an Ingress.", - "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", - "secretName": "SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", + "hosts": "hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", + "secretName": "secretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", } func (IngressTLS) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go index 77259e368..005d64e7f 100644 --- a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go @@ -266,6 +266,73 @@ func (in *IngressList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressLoadBalancerIngress) DeepCopyInto(out *IngressLoadBalancerIngress) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]IngressPortStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerIngress. +func (in *IngressLoadBalancerIngress) DeepCopy() *IngressLoadBalancerIngress { + if in == nil { + return nil + } + out := new(IngressLoadBalancerIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressLoadBalancerStatus) DeepCopyInto(out *IngressLoadBalancerStatus) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]IngressLoadBalancerIngress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerStatus. +func (in *IngressLoadBalancerStatus) DeepCopy() *IngressLoadBalancerStatus { + if in == nil { + return nil + } + out := new(IngressLoadBalancerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressPortStatus) DeepCopyInto(out *IngressPortStatus) { + *out = *in + if in.Error != nil { + in, out := &in.Error, &out.Error + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressPortStatus. +func (in *IngressPortStatus) DeepCopy() *IngressPortStatus { + if in == nil { + return nil + } + out := new(IngressPortStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IngressRule) DeepCopyInto(out *IngressRule) { *out = *in diff --git a/vendor/k8s.io/api/node/v1/generated.proto b/vendor/k8s.io/api/node/v1/generated.proto index 294be85b6..0152d5e3a 100644 --- a/vendor/k8s.io/api/node/v1/generated.proto +++ b/vendor/k8s.io/api/node/v1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/node/v1"; // Overhead structure represents the resource overhead associated with running a pod. message Overhead { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional map podFixed = 1; } @@ -49,7 +49,7 @@ message RuntimeClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -61,13 +61,13 @@ message RuntimeClass { // and is immutable. optional string handler = 2; - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/ // +optional optional Overhead overhead = 3; - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -82,7 +82,7 @@ message RuntimeClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated RuntimeClass items = 2; } diff --git a/vendor/k8s.io/api/node/v1/types.go b/vendor/k8s.io/api/node/v1/types.go index 984696d98..b00f58772 100644 --- a/vendor/k8s.io/api/node/v1/types.go +++ b/vendor/k8s.io/api/node/v1/types.go @@ -34,11 +34,12 @@ import ( // https://kubernetes.io/docs/concepts/containers/runtime-class/ type RuntimeClass struct { metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -50,13 +51,13 @@ type RuntimeClass struct { // and is immutable. Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/ // +optional Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -66,7 +67,7 @@ type RuntimeClass struct { // Overhead structure represents the resource overhead associated with running a pod. type Overhead struct { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` } @@ -96,11 +97,12 @@ type Scheduling struct { // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go index a9eddc60e..f5e6b3277 100644 --- a/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go @@ -24,12 +24,12 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Overhead = map[string]string{ "": "Overhead structure represents the resource overhead associated with running a pod.", - "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", + "podFixed": "podFixed represents the fixed resource overhead associated with running a pod.", } func (Overhead) SwaggerDoc() map[string]string { @@ -39,9 +39,9 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://kubernetes.io/docs/concepts/containers/runtime-class/", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see\n https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/", - "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", + "handler": "handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see\n https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/", + "scheduling": "scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -51,7 +51,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (RuntimeClassList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto index d46e0ec6a..4673e9261 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/node/v1alpha1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/node/v1alpha1"; // Overhead structure represents the resource overhead associated with running a pod. message Overhead { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional map podFixed = 1; } @@ -49,7 +49,7 @@ message RuntimeClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the RuntimeClass + // spec represents specification of the RuntimeClass // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status optional RuntimeClassSpec spec = 2; } @@ -61,7 +61,7 @@ message RuntimeClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated RuntimeClass items = 2; } @@ -70,7 +70,7 @@ message RuntimeClassList { // Interface (CRI) implementation, as well as any other components that need to // understand how the pod will be run. The RuntimeClassSpec is immutable. message RuntimeClassSpec { - // RuntimeHandler specifies the underlying runtime and configuration that the + // runtimeHandler specifies the underlying runtime and configuration that the // CRI implementation will use to handle pods of this class. The possible // values are specific to the node & CRI configuration. It is assumed that // all handlers are available on every node, and handlers of the same name are @@ -78,17 +78,17 @@ message RuntimeClassSpec { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) + // The runtimeHandler must be lowercase, conform to the DNS Label (RFC 1123) // requirements, and is immutable. optional string runtimeHandler = 1; - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional optional Overhead overhead = 2; - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. diff --git a/vendor/k8s.io/api/node/v1alpha1/types.go b/vendor/k8s.io/api/node/v1alpha1/types.go index 588c8e4c0..bf9e284bf 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types.go +++ b/vendor/k8s.io/api/node/v1alpha1/types.go @@ -34,11 +34,12 @@ import ( // https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class type RuntimeClass struct { metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the RuntimeClass + // spec represents specification of the RuntimeClass // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Spec RuntimeClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` } @@ -48,7 +49,7 @@ type RuntimeClass struct { // Interface (CRI) implementation, as well as any other components that need to // understand how the pod will be run. The RuntimeClassSpec is immutable. type RuntimeClassSpec struct { - // RuntimeHandler specifies the underlying runtime and configuration that the + // runtimeHandler specifies the underlying runtime and configuration that the // CRI implementation will use to handle pods of this class. The possible // values are specific to the node & CRI configuration. It is assumed that // all handlers are available on every node, and handlers of the same name are @@ -56,17 +57,17 @@ type RuntimeClassSpec struct { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) + // The runtimeHandler must be lowercase, conform to the DNS Label (RFC 1123) // requirements, and is immutable. RuntimeHandler string `json:"runtimeHandler" protobuf:"bytes,1,opt,name=runtimeHandler"` - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,2,opt,name=overhead"` - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -76,7 +77,7 @@ type RuntimeClassSpec struct { // Overhead structure represents the resource overhead associated with running a pod. type Overhead struct { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` } @@ -106,11 +107,12 @@ type Scheduling struct { // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go index 96413754f..ccc1b7085 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go @@ -24,12 +24,12 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Overhead = map[string]string{ "": "Overhead structure represents the resource overhead associated with running a pod.", - "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", + "podFixed": "podFixed represents the fixed resource overhead associated with running a pod.", } func (Overhead) SwaggerDoc() map[string]string { @@ -39,7 +39,7 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec represents specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -49,7 +49,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (RuntimeClassList) SwaggerDoc() map[string]string { @@ -58,9 +58,9 @@ func (RuntimeClassList) SwaggerDoc() map[string]string { var map_RuntimeClassSpec = map[string]string{ "": "RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable.", - "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", - "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", + "runtimeHandler": "runtimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The runtimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", + "scheduling": "scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClassSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto index 8ffad6973..54dbc0995 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.proto +++ b/vendor/k8s.io/api/node/v1beta1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/node/v1beta1"; // Overhead structure represents the resource overhead associated with running a pod. message Overhead { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional map podFixed = 1; } @@ -49,7 +49,7 @@ message RuntimeClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -57,17 +57,17 @@ message RuntimeClass { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // The handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, // and is immutable. optional string handler = 2; - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional optional Overhead overhead = 3; - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -82,7 +82,7 @@ message RuntimeClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated RuntimeClass items = 2; } diff --git a/vendor/k8s.io/api/node/v1beta1/types.go b/vendor/k8s.io/api/node/v1beta1/types.go index b924cb421..74ecca26a 100644 --- a/vendor/k8s.io/api/node/v1beta1/types.go +++ b/vendor/k8s.io/api/node/v1beta1/types.go @@ -36,11 +36,12 @@ import ( // https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class type RuntimeClass struct { metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -48,17 +49,17 @@ type RuntimeClass struct { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // The handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, // and is immutable. Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -68,7 +69,7 @@ type RuntimeClass struct { // Overhead structure represents the resource overhead associated with running a pod. type Overhead struct { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` } @@ -100,11 +101,12 @@ type Scheduling struct { // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go index fec4398b2..086105ecc 100644 --- a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go @@ -24,12 +24,12 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Overhead = map[string]string{ "": "Overhead structure represents the resource overhead associated with running a pod.", - "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", + "podFixed": "podFixed represents the fixed resource overhead associated with running a pod.", } func (Overhead) SwaggerDoc() map[string]string { @@ -39,9 +39,9 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", - "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", + "handler": "handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", + "scheduling": "scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -51,7 +51,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (RuntimeClassList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/policy/v1/generated.pb.go b/vendor/k8s.io/api/policy/v1/generated.pb.go index cba6ca4bb..d7e467a92 100644 --- a/vendor/k8s.io/api/policy/v1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1/generated.pb.go @@ -201,58 +201,61 @@ func init() { } var fileDescriptor_2d50488813b2d18e = []byte{ - // 808 bytes of a gzipped FileDescriptorProto + // 854 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0xcf, 0x8f, 0xdb, 0x44, - 0x14, 0xc7, 0xe3, 0xcd, 0x66, 0x59, 0xa6, 0x49, 0xb4, 0x0c, 0x0b, 0x2c, 0x39, 0x38, 0x28, 0xa7, - 0x82, 0xd4, 0x31, 0xdb, 0x22, 0xb4, 0xaa, 0x04, 0xa2, 0x6e, 0x56, 0x50, 0xd4, 0x25, 0xd5, 0x2c, - 0x08, 0x09, 0x81, 0xc4, 0xc4, 0x7e, 0xcd, 0x0e, 0xb1, 0x3d, 0xd6, 0xcc, 0x38, 0x34, 0x27, 0xf8, - 0x13, 0xf8, 0x17, 0xf8, 0x53, 0x38, 0xb1, 0x27, 0xd4, 0x63, 0xc5, 0x21, 0x62, 0xcd, 0x3f, 0x82, - 0x3c, 0x76, 0x7e, 0x38, 0xf1, 0xaa, 0x59, 0x0e, 0xbd, 0x79, 0xde, 0x7b, 0xdf, 0xcf, 0x9b, 0xf7, - 0xe6, 0xcd, 0xc8, 0xe8, 0x93, 0xf1, 0x89, 0x22, 0x5c, 0x38, 0xe3, 0x64, 0x08, 0x32, 0x02, 0x0d, - 0xca, 0x99, 0x40, 0xe4, 0x0b, 0xe9, 0x14, 0x0e, 0x16, 0x73, 0x27, 0x16, 0x01, 0xf7, 0xa6, 0xce, - 0xe4, 0xd8, 0x19, 0x41, 0x04, 0x92, 0x69, 0xf0, 0x49, 0x2c, 0x85, 0x16, 0xf8, 0x30, 0x8f, 0x22, - 0x2c, 0xe6, 0x24, 0x8f, 0x22, 0x93, 0xe3, 0xce, 0x9d, 0x11, 0xd7, 0x17, 0xc9, 0x90, 0x78, 0x22, - 0x74, 0x46, 0x62, 0x24, 0x1c, 0x13, 0x3c, 0x4c, 0x9e, 0x9a, 0x95, 0x59, 0x98, 0xaf, 0x1c, 0xd2, - 0xf9, 0x68, 0x99, 0x2a, 0x64, 0xde, 0x05, 0x8f, 0x40, 0x4e, 0x9d, 0x78, 0x3c, 0xca, 0x0c, 0xca, - 0x09, 0x41, 0xb3, 0x8a, 0xd4, 0x1d, 0xe7, 0x3a, 0x95, 0x4c, 0x22, 0xcd, 0x43, 0xd8, 0x10, 0x7c, - 0xfc, 0x32, 0x81, 0xf2, 0x2e, 0x20, 0x64, 0x1b, 0xba, 0x7b, 0xd7, 0xe9, 0x12, 0xcd, 0x03, 0x87, - 0x47, 0x5a, 0x69, 0xb9, 0x2e, 0xea, 0xfd, 0x6d, 0xa1, 0xfd, 0xd3, 0x09, 0xf7, 0x34, 0x17, 0x11, - 0xfe, 0x11, 0xed, 0x67, 0x55, 0xf8, 0x4c, 0xb3, 0x23, 0xeb, 0x3d, 0xeb, 0xf6, 0xad, 0xbb, 0x1f, - 0x92, 0x65, 0xe3, 0x16, 0x50, 0x12, 0x8f, 0x47, 0x99, 0x41, 0x91, 0x2c, 0x9a, 0x4c, 0x8e, 0xc9, - 0x60, 0xf8, 0x13, 0x78, 0xfa, 0x0c, 0x34, 0x73, 0xf1, 0xe5, 0xac, 0x5b, 0x4b, 0x67, 0x5d, 0xb4, - 0xb4, 0xd1, 0x05, 0x15, 0x07, 0xa8, 0xe5, 0x43, 0x00, 0x1a, 0x06, 0x71, 0x96, 0x51, 0x1d, 0xed, - 0x98, 0x34, 0xf7, 0xb6, 0x4b, 0xd3, 0x5f, 0x95, 0xba, 0x6f, 0xa4, 0xb3, 0x6e, 0xab, 0x64, 0xa2, - 0x65, 0x78, 0xef, 0xf7, 0x1d, 0xf4, 0xe6, 0x13, 0xe1, 0xf7, 0xb9, 0x92, 0x89, 0x31, 0xb9, 0x89, - 0x3f, 0x02, 0xfd, 0x0a, 0xea, 0x1c, 0xa0, 0x5d, 0x15, 0x83, 0x57, 0x94, 0x77, 0x87, 0x54, 0x8d, - 0x1f, 0xa9, 0xd8, 0xda, 0x79, 0x0c, 0x9e, 0xdb, 0x2c, 0xd0, 0xbb, 0xd9, 0x8a, 0x1a, 0x10, 0xfe, - 0x16, 0xed, 0x29, 0xcd, 0x74, 0xa2, 0x8e, 0xea, 0x06, 0xe9, 0x6c, 0x8f, 0x34, 0x32, 0xb7, 0x5d, - 0x40, 0xf7, 0xf2, 0x35, 0x2d, 0x70, 0xbd, 0x3f, 0x2d, 0xf4, 0x4e, 0x85, 0xea, 0x31, 0x57, 0x1a, - 0x7f, 0xbf, 0xd1, 0x27, 0xb2, 0x5d, 0x9f, 0x32, 0xb5, 0xe9, 0xd2, 0x41, 0x91, 0x75, 0x7f, 0x6e, - 0x59, 0xe9, 0xd1, 0x57, 0xa8, 0xc1, 0x35, 0x84, 0xd9, 0x0c, 0xd4, 0x6f, 0xdf, 0xba, 0xfb, 0xfe, - 0xd6, 0x15, 0xb9, 0xad, 0x82, 0xda, 0x78, 0x94, 0xe9, 0x69, 0x8e, 0xe9, 0xfd, 0xb5, 0x53, 0x59, - 0x49, 0xd6, 0x44, 0xfc, 0x14, 0x35, 0x43, 0x1e, 0x3d, 0x98, 0x30, 0x1e, 0xb0, 0x61, 0x00, 0x2f, - 0x3d, 0xf5, 0xec, 0xca, 0x90, 0xfc, 0xca, 0x90, 0x47, 0x91, 0x1e, 0xc8, 0x73, 0x2d, 0x79, 0x34, - 0x72, 0x0f, 0xd2, 0x59, 0xb7, 0x79, 0xb6, 0x42, 0xa2, 0x25, 0x2e, 0xfe, 0x01, 0xed, 0x2b, 0x08, - 0xc0, 0xd3, 0x42, 0xde, 0x6c, 0xb4, 0x1f, 0xb3, 0x21, 0x04, 0xe7, 0x85, 0xd4, 0x6d, 0x66, 0x2d, - 0x9b, 0xaf, 0xe8, 0x02, 0x89, 0x03, 0xd4, 0x0e, 0xd9, 0xb3, 0x6f, 0x22, 0xb6, 0x28, 0xa4, 0xfe, - 0x3f, 0x0b, 0xc1, 0xe9, 0xac, 0xdb, 0x3e, 0x2b, 0xb1, 0xe8, 0x1a, 0xbb, 0xf7, 0x47, 0x03, 0xbd, - 0x7b, 0xed, 0x40, 0xe1, 0x2f, 0x11, 0x16, 0x43, 0x05, 0x72, 0x02, 0xfe, 0xe7, 0xf9, 0xa3, 0xc2, - 0x45, 0x64, 0x1a, 0x5b, 0x77, 0x3b, 0xc5, 0x01, 0xe1, 0xc1, 0x46, 0x04, 0xad, 0x50, 0xe1, 0x5f, - 0x50, 0xcb, 0xcf, 0xb3, 0x80, 0xff, 0x44, 0xf8, 0xf3, 0x91, 0x70, 0x6f, 0x38, 0xe4, 0xa4, 0xbf, - 0x0a, 0x39, 0x8d, 0xb4, 0x9c, 0xba, 0x6f, 0x15, 0x5b, 0x69, 0x95, 0x7c, 0xb4, 0x9c, 0x2f, 0x2b, - 0xc6, 0x5f, 0x20, 0xd5, 0x83, 0x20, 0x10, 0x3f, 0x83, 0x6f, 0x9a, 0xdb, 0x58, 0x16, 0xd3, 0xdf, - 0x88, 0xa0, 0x15, 0x2a, 0xfc, 0x29, 0x6a, 0x7b, 0x89, 0x94, 0x10, 0xe9, 0x2f, 0x80, 0x05, 0xfa, - 0x62, 0x7a, 0xb4, 0x6b, 0x38, 0x6f, 0x17, 0x9c, 0xf6, 0xc3, 0x92, 0x97, 0xae, 0x45, 0x67, 0x7a, - 0x1f, 0x14, 0x97, 0xe0, 0xcf, 0xf5, 0x8d, 0xb2, 0xbe, 0x5f, 0xf2, 0xd2, 0xb5, 0x68, 0x7c, 0x82, - 0x9a, 0xf0, 0x2c, 0x06, 0x6f, 0xde, 0xcb, 0x3d, 0xa3, 0x3e, 0x2c, 0xd4, 0xcd, 0xd3, 0x15, 0x1f, - 0x2d, 0x45, 0x62, 0x0f, 0x21, 0x4f, 0x44, 0x3e, 0xcf, 0x9f, 0xe6, 0xd7, 0xcc, 0x19, 0x38, 0xdb, - 0xcd, 0xef, 0xc3, 0xb9, 0x6e, 0xf9, 0x30, 0x2e, 0x4c, 0x8a, 0xae, 0x60, 0x3b, 0x01, 0xc2, 0x9b, - 0xc7, 0x84, 0x0f, 0x50, 0x7d, 0x0c, 0x53, 0x33, 0x3e, 0xaf, 0xd3, 0xec, 0x13, 0x7f, 0x86, 0x1a, - 0x13, 0x16, 0x24, 0x50, 0xdc, 0xa3, 0x0f, 0xb6, 0xdb, 0xc7, 0xd7, 0x3c, 0x04, 0x9a, 0x0b, 0xef, - 0xef, 0x9c, 0x58, 0xee, 0xfd, 0xcb, 0x2b, 0xbb, 0xf6, 0xfc, 0xca, 0xae, 0xbd, 0xb8, 0xb2, 0x6b, - 0xbf, 0xa6, 0xb6, 0x75, 0x99, 0xda, 0xd6, 0xf3, 0xd4, 0xb6, 0x5e, 0xa4, 0xb6, 0xf5, 0x4f, 0x6a, - 0x5b, 0xbf, 0xfd, 0x6b, 0xd7, 0xbe, 0x3b, 0xac, 0xfa, 0x89, 0xf8, 0x2f, 0x00, 0x00, 0xff, 0xff, - 0x8d, 0x9b, 0x69, 0xee, 0x74, 0x08, 0x00, 0x00, + 0x14, 0xc7, 0xe3, 0xcd, 0x66, 0xd9, 0x4e, 0x93, 0x68, 0x19, 0x16, 0x58, 0x72, 0x70, 0xaa, 0x9c, + 0x16, 0xa4, 0x8e, 0xd9, 0x16, 0xa1, 0x55, 0x25, 0x50, 0xeb, 0x66, 0x05, 0x45, 0x5d, 0xb2, 0x9a, + 0x6d, 0x85, 0x84, 0x40, 0x62, 0x62, 0xbf, 0x26, 0x43, 0x6c, 0x8f, 0xe5, 0x19, 0x87, 0xe6, 0x44, + 0xff, 0x04, 0xfe, 0x05, 0xfe, 0x14, 0x4e, 0xec, 0xb1, 0xdc, 0x2a, 0x0e, 0x11, 0x6b, 0xfe, 0x0b, + 0x4e, 0xc8, 0x63, 0xe7, 0x87, 0x37, 0x0e, 0xcd, 0x72, 0xe8, 0xcd, 0xf3, 0xde, 0xfb, 0x7e, 0x9e, + 0xdf, 0x8f, 0x71, 0x82, 0x3e, 0x1b, 0x1d, 0x4b, 0xc2, 0x85, 0x35, 0x8a, 0xfb, 0x10, 0x05, 0xa0, + 0x40, 0x5a, 0x63, 0x08, 0x5c, 0x11, 0x59, 0xb9, 0x83, 0x85, 0xdc, 0x0a, 0x85, 0xc7, 0x9d, 0x89, + 0x35, 0x3e, 0xb2, 0x06, 0x10, 0x40, 0xc4, 0x14, 0xb8, 0x24, 0x8c, 0x84, 0x12, 0x78, 0x3f, 0x8b, + 0x22, 0x2c, 0xe4, 0x24, 0x8b, 0x22, 0xe3, 0xa3, 0xd6, 0xed, 0x01, 0x57, 0xc3, 0xb8, 0x4f, 0x1c, + 0xe1, 0x5b, 0x03, 0x31, 0x10, 0x96, 0x0e, 0xee, 0xc7, 0xcf, 0xf4, 0x49, 0x1f, 0xf4, 0x53, 0x06, + 0x69, 0x7d, 0xb2, 0x48, 0xe5, 0x33, 0x67, 0xc8, 0x03, 0x88, 0x26, 0x56, 0x38, 0x1a, 0xa4, 0x06, + 0x69, 0xf9, 0xa0, 0x58, 0x49, 0xea, 0x96, 0xb5, 0x4e, 0x15, 0xc5, 0x81, 0xe2, 0x3e, 0xac, 0x08, + 0x3e, 0x7d, 0x9d, 0x40, 0x3a, 0x43, 0xf0, 0xd9, 0x8a, 0xee, 0xee, 0x3a, 0x5d, 0xac, 0xb8, 0x67, + 0xf1, 0x40, 0x49, 0x15, 0x5d, 0x15, 0x75, 0xfe, 0x34, 0xd0, 0xee, 0xc9, 0x98, 0x3b, 0x8a, 0x8b, + 0x00, 0xff, 0x80, 0x76, 0xd3, 0x2a, 0x5c, 0xa6, 0xd8, 0x81, 0x71, 0xcb, 0x38, 0xbc, 0x79, 0xe7, + 0x63, 0xb2, 0x68, 0xdc, 0x1c, 0x4a, 0xc2, 0xd1, 0x20, 0x35, 0x48, 0x92, 0x46, 0x93, 0xf1, 0x11, + 0xe9, 0xf5, 0x7f, 0x04, 0x47, 0x9d, 0x82, 0x62, 0x36, 0xbe, 0x98, 0xb6, 0x2b, 0xc9, 0xb4, 0x8d, + 0x16, 0x36, 0x3a, 0xa7, 0x62, 0x0f, 0x35, 0x5c, 0xf0, 0x40, 0x41, 0x2f, 0x4c, 0x33, 0xca, 0x83, + 0x2d, 0x9d, 0xe6, 0xee, 0x66, 0x69, 0xba, 0xcb, 0x52, 0xfb, 0xed, 0x64, 0xda, 0x6e, 0x14, 0x4c, + 0xb4, 0x08, 0xef, 0xfc, 0xba, 0x85, 0xde, 0x39, 0x13, 0x6e, 0x97, 0xcb, 0x28, 0xd6, 0x26, 0x3b, + 0x76, 0x07, 0xa0, 0xde, 0x40, 0x9d, 0x3d, 0xb4, 0x2d, 0x43, 0x70, 0xf2, 0xf2, 0x6e, 0x93, 0xb2, + 0xf5, 0x23, 0x25, 0xaf, 0x76, 0x1e, 0x82, 0x63, 0xd7, 0x73, 0xf4, 0x76, 0x7a, 0xa2, 0x1a, 0x84, + 0xbf, 0x41, 0x3b, 0x52, 0x31, 0x15, 0xcb, 0x83, 0xaa, 0x46, 0x5a, 0x9b, 0x23, 0xb5, 0xcc, 0x6e, + 0xe6, 0xd0, 0x9d, 0xec, 0x4c, 0x73, 0x5c, 0xe7, 0x77, 0x03, 0xbd, 0x5f, 0xa2, 0x7a, 0xcc, 0xa5, + 0xc2, 0xdf, 0xad, 0xf4, 0x89, 0x6c, 0xd6, 0xa7, 0x54, 0xad, 0xbb, 0xb4, 0x97, 0x67, 0xdd, 0x9d, + 0x59, 0x96, 0x7a, 0xf4, 0x35, 0xaa, 0x71, 0x05, 0x7e, 0xba, 0x03, 0xd5, 0xc3, 0x9b, 0x77, 0x3e, + 0xdc, 0xb8, 0x22, 0xbb, 0x91, 0x53, 0x6b, 0x8f, 0x52, 0x3d, 0xcd, 0x30, 0x9d, 0x3f, 0xaa, 0xa5, + 0x95, 0xa4, 0x4d, 0xc4, 0xcf, 0x50, 0xdd, 0xe7, 0xc1, 0x83, 0x31, 0xe3, 0x1e, 0xeb, 0x7b, 0xf0, + 0xda, 0xa9, 0xa7, 0x57, 0x86, 0x64, 0x57, 0x86, 0x3c, 0x0a, 0x54, 0x2f, 0x3a, 0x57, 0x11, 0x0f, + 0x06, 0xf6, 0x5e, 0x32, 0x6d, 0xd7, 0x4f, 0x97, 0x48, 0xb4, 0xc0, 0xc5, 0xdf, 0xa3, 0x5d, 0x09, + 0x1e, 0x38, 0x4a, 0x44, 0xd7, 0x5b, 0xed, 0xc7, 0xac, 0x0f, 0xde, 0x79, 0x2e, 0xb5, 0xeb, 0x69, + 0xcb, 0x66, 0x27, 0x3a, 0x47, 0x62, 0x0f, 0x35, 0x7d, 0xf6, 0xfc, 0x69, 0xc0, 0xe6, 0x85, 0x54, + 0xff, 0x67, 0x21, 0x38, 0x99, 0xb6, 0x9b, 0xa7, 0x05, 0x16, 0xbd, 0xc2, 0xc6, 0x2f, 0x0c, 0xd4, + 0x8a, 0x83, 0x21, 0x30, 0x4f, 0x0d, 0x27, 0x67, 0xc2, 0x9d, 0x7d, 0x27, 0xce, 0xf4, 0x70, 0x0e, + 0xb6, 0x6f, 0x19, 0x87, 0x37, 0xec, 0xfb, 0xc9, 0xb4, 0xdd, 0x7a, 0xba, 0x36, 0xea, 0x9f, 0x69, + 0xdb, 0x5c, 0xef, 0x7d, 0x32, 0x09, 0x81, 0xfe, 0x47, 0x8e, 0xce, 0x6f, 0x35, 0xf4, 0xc1, 0xda, + 0x9d, 0xc6, 0x5f, 0x21, 0x2c, 0xfa, 0x12, 0xa2, 0x31, 0xb8, 0x5f, 0x64, 0xdf, 0x35, 0x2e, 0x02, + 0x3d, 0xdb, 0xaa, 0xdd, 0xca, 0x77, 0x04, 0xf7, 0x56, 0x22, 0x68, 0x89, 0x0a, 0xff, 0x8c, 0x1a, + 0x6e, 0x96, 0x05, 0xdc, 0x33, 0xe1, 0xce, 0xb6, 0xd2, 0xbe, 0xe6, 0x3d, 0x23, 0xdd, 0x65, 0xc8, + 0x49, 0xa0, 0xa2, 0x89, 0xfd, 0x6e, 0xfe, 0x2a, 0x8d, 0x82, 0x8f, 0x16, 0xf3, 0xa5, 0xc5, 0xb8, + 0x73, 0xa4, 0x7c, 0xe0, 0x79, 0xe2, 0x27, 0x70, 0xf5, 0x7c, 0x6b, 0x8b, 0x62, 0xba, 0x2b, 0x11, + 0xb4, 0x44, 0x85, 0x3f, 0x47, 0x4d, 0x27, 0x8e, 0x22, 0x08, 0xd4, 0x97, 0x59, 0x67, 0xf5, 0xb0, + 0x6a, 0xf6, 0x7b, 0x39, 0xa7, 0xf9, 0xb0, 0xe0, 0xa5, 0x57, 0xa2, 0x53, 0xbd, 0x0b, 0x92, 0x47, + 0xe0, 0xce, 0xf4, 0xb5, 0xa2, 0xbe, 0x5b, 0xf0, 0xd2, 0x2b, 0xd1, 0xf8, 0x18, 0xd5, 0xe1, 0x79, + 0x08, 0xce, 0xac, 0x97, 0x3b, 0x5a, 0xbd, 0x9f, 0xab, 0xeb, 0x27, 0x4b, 0x3e, 0x5a, 0x88, 0xc4, + 0x0e, 0x42, 0x8e, 0x08, 0x5c, 0x9e, 0xfd, 0x3a, 0xbc, 0xa5, 0x67, 0x60, 0x6d, 0x76, 0x85, 0x1e, + 0xce, 0x74, 0x8b, 0x6f, 0xf3, 0xdc, 0x24, 0xe9, 0x12, 0xb6, 0xe5, 0x21, 0xbc, 0x3a, 0x26, 0xbc, + 0x87, 0xaa, 0x23, 0x98, 0xe8, 0xf5, 0xb9, 0x41, 0xd3, 0x47, 0x7c, 0x1f, 0xd5, 0xc6, 0xcc, 0x8b, + 0x21, 0xbf, 0xca, 0x1f, 0x6d, 0xf6, 0x1e, 0x4f, 0xb8, 0x0f, 0x34, 0x13, 0xde, 0xdb, 0x3a, 0x36, + 0xec, 0x7b, 0x17, 0x97, 0x66, 0xe5, 0xe5, 0xa5, 0x59, 0x79, 0x75, 0x69, 0x56, 0x5e, 0x24, 0xa6, + 0x71, 0x91, 0x98, 0xc6, 0xcb, 0xc4, 0x34, 0x5e, 0x25, 0xa6, 0xf1, 0x57, 0x62, 0x1a, 0xbf, 0xfc, + 0x6d, 0x56, 0xbe, 0xdd, 0x2f, 0xfb, 0x1f, 0xf3, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x79, 0xd7, + 0x99, 0xdb, 0xf7, 0x08, 0x00, 0x00, } func (m *Eviction) Marshal() (dAtA []byte, err error) { @@ -420,6 +423,13 @@ func (m *PodDisruptionBudgetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + if m.UnhealthyPodEvictionPolicy != nil { + i -= len(*m.UnhealthyPodEvictionPolicy) + copy(dAtA[i:], *m.UnhealthyPodEvictionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UnhealthyPodEvictionPolicy))) + i-- + dAtA[i] = 0x22 + } if m.MaxUnavailable != nil { { size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) @@ -616,6 +626,10 @@ func (m *PodDisruptionBudgetSpec) Size() (n int) { l = m.MaxUnavailable.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.UnhealthyPodEvictionPolicy != nil { + l = len(*m.UnhealthyPodEvictionPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -701,6 +715,7 @@ func (this *PodDisruptionBudgetSpec) String() string { `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `UnhealthyPodEvictionPolicy:` + valueToStringGenerated(this.UnhealthyPodEvictionPolicy) + `,`, `}`, }, "") return s @@ -1266,6 +1281,39 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnhealthyPodEvictionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := UnhealthyPodEvictionPolicyType(dAtA[iNdEx:postIndex]) + m.UnhealthyPodEvictionPolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/policy/v1/generated.proto b/vendor/k8s.io/api/policy/v1/generated.proto index 33bc72b86..a79e71028 100644 --- a/vendor/k8s.io/api/policy/v1/generated.proto +++ b/vendor/k8s.io/api/policy/v1/generated.proto @@ -92,6 +92,34 @@ message PodDisruptionBudgetSpec { // by specifying 0. This is a mutually exclusive setting with "minAvailable". // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; + + // UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods + // should be considered for eviction. Current implementation considers healthy pods, + // as pods that have status.conditions item with type="Ready",status="True". + // + // Valid policies are IfHealthyBudget and AlwaysAllow. + // If no policy is specified, the default behavior will be used, + // which corresponds to the IfHealthyBudget policy. + // + // IfHealthyBudget policy means that running pods (status.phase="Running"), + // but not yet healthy can be evicted only if the guarded application is not + // disrupted (status.currentHealthy is at least equal to status.desiredHealthy). + // Healthy pods will be subject to the PDB for eviction. + // + // AlwaysAllow policy means that all running pods (status.phase="Running"), + // but not yet healthy are considered disrupted and can be evicted regardless + // of whether the criteria in a PDB is met. This means perspective running + // pods of a disrupted application might not get a chance to become healthy. + // Healthy pods will be subject to the PDB for eviction. + // + // Additional policies may be added in the future. + // Clients making eviction decisions should disallow eviction of unhealthy pods + // if they encounter an unrecognized policy in this field. + // + // This field is beta-level. The eviction API uses this field when + // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). + // +optional + optional string unhealthyPodEvictionPolicy = 4; } // PodDisruptionBudgetStatus represents information about the status of a diff --git a/vendor/k8s.io/api/policy/v1/types.go b/vendor/k8s.io/api/policy/v1/types.go index 4a03696f0..45b9550f4 100644 --- a/vendor/k8s.io/api/policy/v1/types.go +++ b/vendor/k8s.io/api/policy/v1/types.go @@ -47,8 +47,56 @@ type PodDisruptionBudgetSpec struct { // by specifying 0. This is a mutually exclusive setting with "minAvailable". // +optional MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,3,opt,name=maxUnavailable"` + + // UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods + // should be considered for eviction. Current implementation considers healthy pods, + // as pods that have status.conditions item with type="Ready",status="True". + // + // Valid policies are IfHealthyBudget and AlwaysAllow. + // If no policy is specified, the default behavior will be used, + // which corresponds to the IfHealthyBudget policy. + // + // IfHealthyBudget policy means that running pods (status.phase="Running"), + // but not yet healthy can be evicted only if the guarded application is not + // disrupted (status.currentHealthy is at least equal to status.desiredHealthy). + // Healthy pods will be subject to the PDB for eviction. + // + // AlwaysAllow policy means that all running pods (status.phase="Running"), + // but not yet healthy are considered disrupted and can be evicted regardless + // of whether the criteria in a PDB is met. This means perspective running + // pods of a disrupted application might not get a chance to become healthy. + // Healthy pods will be subject to the PDB for eviction. + // + // Additional policies may be added in the future. + // Clients making eviction decisions should disallow eviction of unhealthy pods + // if they encounter an unrecognized policy in this field. + // + // This field is beta-level. The eviction API uses this field when + // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). + // +optional + UnhealthyPodEvictionPolicy *UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty" protobuf:"bytes,4,opt,name=unhealthyPodEvictionPolicy"` } +// UnhealthyPodEvictionPolicyType defines the criteria for when unhealthy pods +// should be considered for eviction. +// +enum +type UnhealthyPodEvictionPolicyType string + +const ( + // IfHealthyBudget policy means that running pods (status.phase="Running"), + // but not yet healthy can be evicted only if the guarded application is not + // disrupted (status.currentHealthy is at least equal to status.desiredHealthy). + // Healthy pods will be subject to the PDB for eviction. + IfHealthyBudget UnhealthyPodEvictionPolicyType = "IfHealthyBudget" + + // AlwaysAllow policy means that all running pods (status.phase="Running"), + // but not yet healthy are considered disrupted and can be evicted regardless + // of whether the criteria in a PDB is met. This means perspective running + // pods of a disrupted application might not get a chance to become healthy. + // Healthy pods will be subject to the PDB for eviction. + AlwaysAllow UnhealthyPodEvictionPolicyType = "AlwaysAllow" +) + // PodDisruptionBudgetStatus represents information about the status of a // PodDisruptionBudget. Status may trail the actual state of a system. type PodDisruptionBudgetStatus struct { diff --git a/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go index 3208392e8..799b0794a 100644 --- a/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Eviction = map[string]string{ @@ -59,10 +59,11 @@ func (PodDisruptionBudgetList) SwaggerDoc() map[string]string { } var map_PodDisruptionBudgetSpec = map[string]string{ - "": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.", - "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", - "selector": "Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.", - "maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".", + "": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.", + "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", + "selector": "Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.", + "maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".", + "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).", } func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/policy/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1/zz_generated.deepcopy.go index 485e1c938..74b4f3a3a 100644 --- a/vendor/k8s.io/api/policy/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1/zz_generated.deepcopy.go @@ -137,6 +137,11 @@ func (in *PodDisruptionBudgetSpec) DeepCopyInto(out *PodDisruptionBudgetSpec) { *out = new(intstr.IntOrString) **out = **in } + if in.UnhealthyPodEvictionPolicy != nil { + in, out := &in.UnhealthyPodEvictionPolicy, &out.UnhealthyPodEvictionPolicy + *out = new(UnhealthyPodEvictionPolicyType) + **out = **in + } return } diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go index d19c93b10..0b75d6415 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go @@ -609,127 +609,129 @@ func init() { } var fileDescriptor_014060e454a820dc = []byte{ - // 1907 bytes of a gzipped FileDescriptorProto + // 1946 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0x5b, 0x73, 0xdb, 0xc6, - 0xf5, 0x17, 0x4c, 0x5d, 0xa8, 0xd5, 0xc5, 0xe2, 0xea, 0x62, 0x48, 0xf9, 0x87, 0x70, 0x90, 0x99, - 0xff, 0xb8, 0x69, 0x0a, 0xc6, 0xb2, 0xe3, 0x7a, 0x9a, 0x5e, 0x22, 0x88, 0x92, 0xad, 0x8c, 0x65, - 0xb1, 0x4b, 0x3b, 0xd3, 0x76, 0xdc, 0x4e, 0x97, 0xc0, 0x8a, 0x42, 0x04, 0x02, 0x28, 0x76, 0xc1, - 0x88, 0x6f, 0x7d, 0xe8, 0x43, 0x1f, 0xfb, 0x05, 0x32, 0xfd, 0x00, 0x9d, 0x3e, 0xf5, 0x43, 0xd4, - 0x99, 0xe9, 0x64, 0xf2, 0x98, 0xe9, 0x03, 0xa7, 0x66, 0xbf, 0x85, 0x9f, 0x3a, 0x58, 0x2e, 0x40, - 0x5c, 0x49, 0x3b, 0x33, 0xf6, 0x1b, 0xb1, 0xe7, 0xf7, 0xfb, 0x9d, 0xdd, 0xb3, 0x67, 0xcf, 0x5e, - 0x08, 0xf4, 0xcb, 0xfb, 0x54, 0xb3, 0xdc, 0xc6, 0x65, 0xd0, 0x21, 0xbe, 0x43, 0x18, 0xa1, 0x8d, - 0x3e, 0x71, 0x4c, 0xd7, 0x6f, 0x08, 0x03, 0xf6, 0xac, 0x86, 0xe7, 0xda, 0x96, 0x31, 0x68, 0xf4, - 0x6f, 0x77, 0x08, 0xc3, 0xb7, 0x1b, 0x5d, 0xe2, 0x10, 0x1f, 0x33, 0x62, 0x6a, 0x9e, 0xef, 0x32, - 0x17, 0xee, 0x8e, 0xa1, 0x1a, 0xf6, 0x2c, 0x6d, 0x0c, 0xd5, 0x04, 0x74, 0xef, 0x47, 0x5d, 0x8b, - 0x5d, 0x04, 0x1d, 0xcd, 0x70, 0x7b, 0x8d, 0xae, 0xdb, 0x75, 0x1b, 0x9c, 0xd1, 0x09, 0xce, 0xf9, - 0x17, 0xff, 0xe0, 0xbf, 0xc6, 0x4a, 0x7b, 0x6a, 0xc2, 0xa9, 0xe1, 0xfa, 0xa4, 0xd1, 0xcf, 0x79, - 0xdb, 0xbb, 0x3b, 0xc1, 0xf4, 0xb0, 0x71, 0x61, 0x39, 0xc4, 0x1f, 0x34, 0xbc, 0xcb, 0x6e, 0xd8, - 0x40, 0x1b, 0x3d, 0xc2, 0x70, 0x11, 0xab, 0x51, 0xc6, 0xf2, 0x03, 0x87, 0x59, 0x3d, 0x92, 0x23, - 0xdc, 0x9b, 0x45, 0xa0, 0xc6, 0x05, 0xe9, 0xe1, 0x1c, 0xef, 0x4e, 0x19, 0x2f, 0x60, 0x96, 0xdd, - 0xb0, 0x1c, 0x46, 0x99, 0x9f, 0x25, 0xa9, 0x77, 0xc1, 0xc6, 0x81, 0x6d, 0xbb, 0x5f, 0x12, 0xf3, - 0xb0, 0x7d, 0xd2, 0xf4, 0xad, 0x3e, 0xf1, 0xe1, 0x4d, 0x30, 0xef, 0xe0, 0x1e, 0x91, 0xa5, 0x9b, - 0xd2, 0xad, 0x65, 0x7d, 0xf5, 0xf9, 0x50, 0x99, 0x1b, 0x0d, 0x95, 0xf9, 0xc7, 0xb8, 0x47, 0x10, - 0xb7, 0xa8, 0x9f, 0x80, 0x9a, 0x60, 0x1d, 0xdb, 0xe4, 0xea, 0x73, 0xd7, 0x0e, 0x7a, 0x04, 0xfe, - 0x3f, 0x58, 0x34, 0xb9, 0x80, 0x20, 0xae, 0x0b, 0xe2, 0xe2, 0x58, 0x16, 0x09, 0xab, 0x4a, 0xc1, - 0x75, 0x41, 0x7e, 0xe8, 0x52, 0xd6, 0xc2, 0xec, 0x02, 0xee, 0x03, 0xe0, 0x61, 0x76, 0xd1, 0xf2, - 0xc9, 0xb9, 0x75, 0x25, 0xe8, 0x50, 0xd0, 0x41, 0x2b, 0xb6, 0xa0, 0x04, 0x0a, 0x7e, 0x08, 0xaa, - 0x3e, 0xc1, 0xe6, 0x99, 0x63, 0x0f, 0xe4, 0x6b, 0x37, 0xa5, 0x5b, 0x55, 0x7d, 0x43, 0x30, 0xaa, - 0x48, 0xb4, 0xa3, 0x18, 0xa1, 0xfe, 0x5b, 0x02, 0xd5, 0xa3, 0xbe, 0x65, 0x30, 0xcb, 0x75, 0xe0, - 0xef, 0x41, 0x35, 0x9c, 0x2d, 0x13, 0x33, 0xcc, 0x9d, 0xad, 0xec, 0x7f, 0xa4, 0x4d, 0x32, 0x29, - 0x0e, 0x9e, 0xe6, 0x5d, 0x76, 0xc3, 0x06, 0xaa, 0x85, 0x68, 0xad, 0x7f, 0x5b, 0x3b, 0xeb, 0x7c, - 0x41, 0x0c, 0x76, 0x4a, 0x18, 0x9e, 0x74, 0x6f, 0xd2, 0x86, 0x62, 0x55, 0x68, 0x83, 0x35, 0x93, - 0xd8, 0x84, 0x91, 0x33, 0x2f, 0xf4, 0x48, 0x79, 0x0f, 0x57, 0xf6, 0xef, 0xbc, 0x9a, 0x9b, 0x66, - 0x92, 0xaa, 0xd7, 0x46, 0x43, 0x65, 0x2d, 0xd5, 0x84, 0xd2, 0xe2, 0xea, 0x57, 0x12, 0xd8, 0x39, - 0x6e, 0x3f, 0xf0, 0xdd, 0xc0, 0x6b, 0xb3, 0x70, 0x76, 0xbb, 0x03, 0x61, 0x82, 0x3f, 0x06, 0xf3, - 0x7e, 0x60, 0x47, 0x73, 0xf9, 0x7e, 0x34, 0x97, 0x28, 0xb0, 0xc9, 0xcb, 0xa1, 0xb2, 0x99, 0x61, - 0x3d, 0x19, 0x78, 0x04, 0x71, 0x02, 0xfc, 0x0c, 0x2c, 0xfa, 0xd8, 0xe9, 0x92, 0xb0, 0xeb, 0x95, - 0x5b, 0x2b, 0xfb, 0xaa, 0x56, 0xba, 0xd6, 0xb4, 0x93, 0x26, 0x0a, 0xa1, 0x93, 0x19, 0xe7, 0x9f, - 0x14, 0x09, 0x05, 0xf5, 0x14, 0xac, 0xf1, 0xa9, 0x76, 0x7d, 0xc6, 0x2d, 0xf0, 0x5d, 0x50, 0xe9, - 0x59, 0x0e, 0xef, 0xd4, 0x82, 0xbe, 0x22, 0x58, 0x95, 0x53, 0xcb, 0x41, 0x61, 0x3b, 0x37, 0xe3, - 0x2b, 0x1e, 0xb3, 0xa4, 0x19, 0x5f, 0xa1, 0xb0, 0x5d, 0x7d, 0x00, 0x96, 0x84, 0xc7, 0xa4, 0x50, - 0x65, 0xba, 0x50, 0xa5, 0x40, 0xe8, 0x6f, 0xd7, 0xc0, 0x66, 0xcb, 0x35, 0x9b, 0x16, 0xf5, 0x03, - 0x1e, 0x2f, 0x3d, 0x30, 0xbb, 0x84, 0xbd, 0x85, 0xfc, 0x78, 0x02, 0xe6, 0xa9, 0x47, 0x0c, 0x91, - 0x16, 0xfb, 0x53, 0x62, 0x5b, 0xd0, 0xbf, 0xb6, 0x47, 0x8c, 0xc9, 0xb2, 0x0c, 0xbf, 0x10, 0x57, - 0x83, 0xcf, 0xc0, 0x22, 0x65, 0x98, 0x05, 0x54, 0xae, 0x70, 0xdd, 0xbb, 0xaf, 0xa9, 0xcb, 0xb9, - 0x93, 0x59, 0x1c, 0x7f, 0x23, 0xa1, 0xa9, 0xfe, 0x4b, 0x02, 0x37, 0x0a, 0x58, 0x8f, 0x2c, 0xca, - 0xe0, 0xb3, 0x5c, 0xc4, 0xb4, 0x57, 0x8b, 0x58, 0xc8, 0xe6, 0xf1, 0x8a, 0x17, 0x6f, 0xd4, 0x92, - 0x88, 0x56, 0x1b, 0x2c, 0x58, 0x8c, 0xf4, 0xa2, 0x54, 0xd4, 0x5e, 0x6f, 0x58, 0xfa, 0x9a, 0x90, - 0x5e, 0x38, 0x09, 0x45, 0xd0, 0x58, 0x4b, 0xfd, 0xe6, 0x5a, 0xe1, 0x70, 0xc2, 0x70, 0xc2, 0x73, - 0xb0, 0xda, 0xb3, 0x9c, 0x83, 0x3e, 0xb6, 0x6c, 0xdc, 0x11, 0xab, 0x67, 0x5a, 0x12, 0x84, 0x15, - 0x56, 0x1b, 0x57, 0x58, 0xed, 0xc4, 0x61, 0x67, 0x7e, 0x9b, 0xf9, 0x96, 0xd3, 0xd5, 0x37, 0x46, - 0x43, 0x65, 0xf5, 0x34, 0xa1, 0x84, 0x52, 0xba, 0xf0, 0xb7, 0xa0, 0x4a, 0x89, 0x4d, 0x0c, 0xe6, - 0xfa, 0xaf, 0x57, 0x21, 0x1e, 0xe1, 0x0e, 0xb1, 0xdb, 0x82, 0xaa, 0xaf, 0x86, 0x71, 0x8b, 0xbe, - 0x50, 0x2c, 0x09, 0x6d, 0xb0, 0xde, 0xc3, 0x57, 0x4f, 0x1d, 0x1c, 0x0f, 0xa4, 0xf2, 0x3d, 0x07, - 0x02, 0x47, 0x43, 0x65, 0xfd, 0x34, 0xa5, 0x85, 0x32, 0xda, 0xea, 0x3f, 0x17, 0xc0, 0x6e, 0x69, - 0x56, 0xc1, 0xcf, 0x00, 0x74, 0x3b, 0x94, 0xf8, 0x7d, 0x62, 0x3e, 0x18, 0xef, 0x41, 0x96, 0x1b, - 0x2d, 0xdc, 0x3d, 0x31, 0x41, 0xf0, 0x2c, 0x87, 0x40, 0x05, 0x2c, 0xf8, 0x27, 0x09, 0xac, 0x99, - 0x63, 0x37, 0xc4, 0x6c, 0xb9, 0x66, 0x94, 0x18, 0x0f, 0xbe, 0x4f, 0xbe, 0x6b, 0xcd, 0xa4, 0xd2, - 0x91, 0xc3, 0xfc, 0x81, 0xbe, 0x2d, 0x3a, 0xb4, 0x96, 0xb2, 0xa1, 0xb4, 0xd3, 0x70, 0x48, 0x66, - 0x2c, 0x49, 0xc5, 0x9e, 0xc6, 0x43, 0xbc, 0x30, 0x19, 0x52, 0x33, 0x87, 0x40, 0x05, 0x2c, 0xf8, - 0x73, 0xb0, 0x6e, 0x04, 0xbe, 0x4f, 0x1c, 0xf6, 0x90, 0x60, 0x9b, 0x5d, 0x0c, 0xe4, 0x79, 0xae, - 0xb3, 0x23, 0x74, 0xd6, 0x0f, 0x53, 0x56, 0x94, 0x41, 0x87, 0x7c, 0x93, 0x50, 0xcb, 0x27, 0x66, - 0xc4, 0x5f, 0x48, 0xf3, 0x9b, 0x29, 0x2b, 0xca, 0xa0, 0xe1, 0x7d, 0xb0, 0x4a, 0xae, 0x3c, 0x62, - 0x44, 0x01, 0x5d, 0xe4, 0xec, 0x2d, 0xc1, 0x5e, 0x3d, 0x4a, 0xd8, 0x50, 0x0a, 0x09, 0x0d, 0x00, - 0x0c, 0xd7, 0x31, 0xad, 0xf1, 0x3e, 0xb7, 0xc4, 0x27, 0xa2, 0xf1, 0x6a, 0x59, 0x7c, 0x18, 0xf1, - 0x26, 0xd5, 0x32, 0x6e, 0xa2, 0x28, 0x21, 0xbb, 0x67, 0x03, 0x98, 0x9f, 0x26, 0xb8, 0x01, 0x2a, - 0x97, 0x64, 0x30, 0xde, 0xdb, 0x50, 0xf8, 0x13, 0x7e, 0x0a, 0x16, 0xfa, 0xd8, 0x0e, 0x88, 0x58, - 0x4d, 0x1f, 0xbc, 0x5a, 0x3f, 0x9e, 0x58, 0x3d, 0x82, 0xc6, 0xc4, 0x9f, 0x5c, 0xbb, 0x2f, 0xa9, - 0x5f, 0x4b, 0xa0, 0xd6, 0x72, 0xcd, 0x36, 0x31, 0x02, 0xdf, 0x62, 0x83, 0x16, 0xcf, 0xa4, 0xb7, - 0xb0, 0x2b, 0xa0, 0xd4, 0xae, 0xf0, 0xd1, 0xf4, 0x6c, 0x4e, 0xf7, 0xae, 0x6c, 0x4f, 0x50, 0x9f, - 0x4b, 0x60, 0x3b, 0x87, 0x7e, 0x0b, 0x35, 0xfb, 0x97, 0xe9, 0x9a, 0xfd, 0xe1, 0xeb, 0x0c, 0xa6, - 0xa4, 0x62, 0x7f, 0x5d, 0x2b, 0x18, 0x0a, 0xaf, 0xd7, 0xe1, 0xf9, 0xd1, 0xb7, 0xfa, 0x96, 0x4d, - 0xba, 0xc4, 0xe4, 0x83, 0xa9, 0x26, 0xce, 0x8f, 0xb1, 0x05, 0x25, 0x50, 0x90, 0x82, 0x1d, 0x93, - 0x9c, 0xe3, 0xc0, 0x66, 0x07, 0xa6, 0x79, 0x88, 0x3d, 0xdc, 0xb1, 0x6c, 0x8b, 0x59, 0xe2, 0xc0, - 0xb3, 0xac, 0x7f, 0x32, 0x1a, 0x2a, 0x3b, 0xcd, 0x42, 0xc4, 0xcb, 0xa1, 0xf2, 0x6e, 0xfe, 0xbe, - 0xa0, 0xc5, 0x90, 0x01, 0x2a, 0x91, 0x86, 0x03, 0x20, 0xfb, 0xe4, 0x0f, 0x41, 0xb8, 0xf2, 0x9a, - 0xbe, 0xeb, 0xa5, 0xdc, 0x56, 0xb8, 0xdb, 0x9f, 0x8d, 0x86, 0x8a, 0x8c, 0x4a, 0x30, 0xb3, 0x1d, - 0x97, 0xca, 0xc3, 0x2f, 0xc0, 0x26, 0x16, 0x27, 0xfd, 0xa4, 0xd7, 0x79, 0xee, 0xf5, 0xfe, 0x68, - 0xa8, 0x6c, 0x1e, 0xe4, 0xcd, 0xb3, 0x1d, 0x16, 0x89, 0xc2, 0x06, 0x58, 0xea, 0xf3, 0x4b, 0x01, - 0x95, 0x17, 0xb8, 0xfe, 0xf6, 0x68, 0xa8, 0x2c, 0x8d, 0xef, 0x09, 0xa1, 0xe6, 0xe2, 0x71, 0x9b, - 0x1f, 0x35, 0x23, 0x14, 0xfc, 0x18, 0xac, 0x5c, 0xb8, 0x94, 0x3d, 0x26, 0xec, 0x4b, 0xd7, 0xbf, - 0xe4, 0xd5, 0xa7, 0xaa, 0x6f, 0x8a, 0x19, 0x5c, 0x79, 0x38, 0x31, 0xa1, 0x24, 0x0e, 0xfe, 0x1a, - 0x2c, 0x5f, 0x88, 0x83, 0x65, 0x54, 0x7a, 0x6e, 0x4d, 0x49, 0xb4, 0xd4, 0x21, 0x54, 0xaf, 0x09, - 0xf9, 0xe5, 0xa8, 0x99, 0xa2, 0x89, 0x1a, 0xfc, 0x01, 0x58, 0xe2, 0x1f, 0x27, 0x4d, 0xb9, 0xca, - 0x7b, 0x73, 0x5d, 0xc0, 0x97, 0x1e, 0x8e, 0x9b, 0x51, 0x64, 0x8f, 0xa0, 0x27, 0xad, 0x43, 0x79, - 0x39, 0x0f, 0x3d, 0x69, 0x1d, 0xa2, 0xc8, 0x0e, 0x9f, 0x81, 0x25, 0x4a, 0x1e, 0x59, 0x4e, 0x70, - 0x25, 0x03, 0xbe, 0xe4, 0x6e, 0x4f, 0xe9, 0x6e, 0xfb, 0x88, 0x23, 0x33, 0x47, 0xfa, 0x89, 0xba, - 0xb0, 0xa3, 0x48, 0x12, 0x9a, 0x60, 0xd9, 0x0f, 0x9c, 0x03, 0xfa, 0x94, 0x12, 0x5f, 0x5e, 0xc9, - 0x9d, 0x27, 0xb2, 0xfa, 0x28, 0xc2, 0x66, 0x3d, 0xc4, 0x91, 0x89, 0x11, 0x68, 0x22, 0x0c, 0x4d, - 0x00, 0xf8, 0x07, 0xbf, 0x39, 0xc8, 0x3b, 0x33, 0x4f, 0x9a, 0x28, 0x06, 0x67, 0xfd, 0xac, 0x87, - 0xcb, 0x73, 0x62, 0x46, 0x09, 0x5d, 0xf8, 0x67, 0x09, 0x40, 0x1a, 0x78, 0x9e, 0x4d, 0x7a, 0xc4, - 0x61, 0xd8, 0xe6, 0xad, 0x54, 0x5e, 0xe5, 0xee, 0x7e, 0x3a, 0x2d, 0x6a, 0x39, 0x52, 0xd6, 0x6d, - 0xbc, 0x37, 0xe7, 0xa1, 0xa8, 0xc0, 0x67, 0x38, 0x69, 0xe7, 0x62, 0xb4, 0x6b, 0x33, 0x27, 0xad, - 0xf8, 0x1e, 0x36, 0x99, 0x34, 0x61, 0x47, 0x91, 0x24, 0xfc, 0x1c, 0xec, 0x44, 0xb7, 0x54, 0xe4, - 0xba, 0xec, 0xd8, 0xb2, 0x09, 0x1d, 0x50, 0x46, 0x7a, 0xf2, 0x3a, 0x4f, 0xa6, 0xba, 0x60, 0xee, - 0xa0, 0x42, 0x14, 0x2a, 0x61, 0xc3, 0x1e, 0x50, 0xa2, 0x22, 0x14, 0xae, 0xd0, 0xb8, 0x0a, 0x1e, - 0x51, 0x03, 0xdb, 0xe3, 0xd3, 0xd7, 0x75, 0xee, 0xe0, 0xfd, 0xd1, 0x50, 0x51, 0x9a, 0xd3, 0xa1, - 0x68, 0x96, 0x16, 0xfc, 0x15, 0x90, 0x71, 0x99, 0x9f, 0x0d, 0xee, 0xe7, 0xff, 0xc2, 0xca, 0x56, - 0xea, 0xa0, 0x94, 0x0d, 0x3d, 0xb0, 0x81, 0xd3, 0xef, 0x05, 0x54, 0xae, 0xf1, 0xb5, 0xfe, 0xc1, - 0x94, 0x79, 0xc8, 0x3c, 0x31, 0xe8, 0xb2, 0x08, 0xe3, 0x46, 0xc6, 0x40, 0x51, 0x4e, 0x1d, 0x5e, - 0x01, 0x88, 0xb3, 0xcf, 0x1b, 0x54, 0x86, 0x33, 0x37, 0xb2, 0xdc, 0x9b, 0xc8, 0x24, 0xd5, 0x72, - 0x26, 0x8a, 0x0a, 0x7c, 0x40, 0x06, 0x6a, 0x38, 0xf3, 0x1c, 0x43, 0xe5, 0x1b, 0xdc, 0xf1, 0x0f, - 0x67, 0x3b, 0x8e, 0x39, 0xfa, 0xae, 0xf0, 0x5b, 0xcb, 0x5a, 0x28, 0xca, 0x3b, 0x80, 0x8f, 0xc0, - 0x96, 0x68, 0x7c, 0xea, 0x50, 0x7c, 0x4e, 0xda, 0x03, 0x6a, 0x30, 0x9b, 0xca, 0x9b, 0xbc, 0x76, - 0xcb, 0xa3, 0xa1, 0xb2, 0x75, 0x50, 0x60, 0x47, 0x85, 0x2c, 0xf8, 0x29, 0xd8, 0x38, 0x77, 0xfd, - 0x8e, 0x65, 0x9a, 0xc4, 0x89, 0x94, 0xb6, 0xb8, 0xd2, 0x56, 0x18, 0xff, 0xe3, 0x8c, 0x0d, 0xe5, - 0xd0, 0x90, 0x82, 0x6d, 0xa1, 0xdc, 0xf2, 0x5d, 0xe3, 0xd4, 0x0d, 0x1c, 0x16, 0x6e, 0x17, 0x54, - 0xde, 0x8e, 0xb7, 0xc8, 0xed, 0x83, 0x22, 0xc0, 0xcb, 0xa1, 0x72, 0xb3, 0x60, 0xbb, 0x4a, 0x81, - 0x50, 0xb1, 0x36, 0xb4, 0xc1, 0xaa, 0x78, 0x60, 0x3b, 0xb4, 0x31, 0xa5, 0xb2, 0xcc, 0x97, 0xfa, - 0xbd, 0xe9, 0x85, 0x2d, 0x86, 0x67, 0xd7, 0x3b, 0xbf, 0xf9, 0x25, 0x01, 0x28, 0xa5, 0xae, 0xfe, - 0x55, 0x02, 0xbb, 0xa5, 0x85, 0x11, 0xde, 0x4b, 0xbd, 0xda, 0xa8, 0x99, 0x57, 0x1b, 0x98, 0x27, - 0xbe, 0x81, 0x47, 0x9b, 0xaf, 0x24, 0x20, 0x97, 0xed, 0x10, 0xf0, 0xe3, 0x54, 0x07, 0xdf, 0xcb, - 0x74, 0xb0, 0x96, 0xe3, 0xbd, 0x81, 0xfe, 0x7d, 0x23, 0x81, 0x77, 0xa6, 0xcc, 0x40, 0x5c, 0x90, - 0x88, 0x99, 0x44, 0x3d, 0xc6, 0xe1, 0x52, 0x96, 0x78, 0x1e, 0x4d, 0x0a, 0x52, 0x01, 0x06, 0x95, - 0xb2, 0xe1, 0x53, 0x70, 0x43, 0x54, 0xc3, 0xac, 0x8d, 0x9f, 0xdc, 0x97, 0xf5, 0x77, 0x46, 0x43, - 0xe5, 0x46, 0xb3, 0x18, 0x82, 0xca, 0xb8, 0xea, 0xdf, 0x25, 0xb0, 0x53, 0xbc, 0xe5, 0xc3, 0x3b, - 0xa9, 0x70, 0x2b, 0x99, 0x70, 0x5f, 0xcf, 0xb0, 0x44, 0xb0, 0x7f, 0x07, 0xd6, 0xc5, 0xc1, 0x20, - 0xfd, 0x08, 0x99, 0x0a, 0x7a, 0xb8, 0x44, 0xc2, 0x33, 0xbd, 0x90, 0x88, 0xd2, 0x97, 0xdf, 0xf7, - 0xd3, 0x6d, 0x28, 0xa3, 0xa6, 0xfe, 0x43, 0x02, 0xef, 0xcd, 0xdc, 0x6c, 0xa1, 0x9e, 0xea, 0xba, - 0x96, 0xe9, 0x7a, 0xbd, 0x5c, 0xe0, 0xcd, 0xbc, 0x45, 0xea, 0xbf, 0x78, 0xfe, 0xa2, 0x3e, 0xf7, - 0xed, 0x8b, 0xfa, 0xdc, 0x77, 0x2f, 0xea, 0x73, 0x7f, 0x1c, 0xd5, 0xa5, 0xe7, 0xa3, 0xba, 0xf4, - 0xed, 0xa8, 0x2e, 0x7d, 0x37, 0xaa, 0x4b, 0xff, 0x19, 0xd5, 0xa5, 0xbf, 0xfc, 0xb7, 0x3e, 0xf7, - 0x9b, 0xdd, 0xd2, 0xff, 0x20, 0xfe, 0x17, 0x00, 0x00, 0xff, 0xff, 0xec, 0x71, 0xd7, 0x62, 0xb8, - 0x18, 0x00, 0x00, + 0x15, 0x16, 0x4c, 0x5d, 0xa8, 0xd5, 0xc5, 0xe2, 0xea, 0x62, 0x48, 0x69, 0x08, 0x07, 0x99, 0xe9, + 0xb8, 0x69, 0x0a, 0xc6, 0xb2, 0xe3, 0x7a, 0x9a, 0x5e, 0x2c, 0x88, 0x92, 0xad, 0x8c, 0x65, 0xb1, + 0x4b, 0x2b, 0xd3, 0x76, 0xdc, 0x4e, 0x97, 0xc0, 0x8a, 0x44, 0x04, 0x02, 0x28, 0x76, 0xc1, 0x88, + 0x6f, 0x79, 0xe8, 0x43, 0x1f, 0xfb, 0x07, 0x32, 0xfd, 0x01, 0x9d, 0x3e, 0xf5, 0x47, 0xd4, 0x99, + 0xe9, 0x74, 0xd2, 0xb7, 0x4c, 0x1f, 0x38, 0x35, 0xfb, 0x2f, 0xfc, 0xd4, 0xc1, 0x72, 0x01, 0x12, + 0x37, 0xd2, 0xce, 0x8c, 0xfd, 0x46, 0xec, 0xf9, 0xbe, 0xef, 0xec, 0x9e, 0xdd, 0x3d, 0x67, 0x77, + 0x09, 0xf4, 0xcb, 0xfb, 0x54, 0xb3, 0xdc, 0xda, 0x65, 0xd0, 0x22, 0xbe, 0x43, 0x18, 0xa1, 0xb5, + 0x1e, 0x71, 0x4c, 0xd7, 0xaf, 0x09, 0x03, 0xf6, 0xac, 0x9a, 0xe7, 0xda, 0x96, 0xd1, 0xaf, 0xf5, + 0x6e, 0xb7, 0x08, 0xc3, 0xb7, 0x6b, 0x6d, 0xe2, 0x10, 0x1f, 0x33, 0x62, 0x6a, 0x9e, 0xef, 0x32, + 0x17, 0xee, 0x8e, 0xa0, 0x1a, 0xf6, 0x2c, 0x6d, 0x04, 0xd5, 0x04, 0x74, 0xef, 0x47, 0x6d, 0x8b, + 0x75, 0x82, 0x96, 0x66, 0xb8, 0xdd, 0x5a, 0xdb, 0x6d, 0xbb, 0x35, 0xce, 0x68, 0x05, 0x17, 0xfc, + 0x8b, 0x7f, 0xf0, 0x5f, 0x23, 0xa5, 0x3d, 0x75, 0xc2, 0xa9, 0xe1, 0xfa, 0xa4, 0xd6, 0xcb, 0x78, + 0xdb, 0xbb, 0x3b, 0xc6, 0x74, 0xb1, 0xd1, 0xb1, 0x1c, 0xe2, 0xf7, 0x6b, 0xde, 0x65, 0x3b, 0x6c, + 0xa0, 0xb5, 0x2e, 0x61, 0x38, 0x8f, 0x55, 0x2b, 0x62, 0xf9, 0x81, 0xc3, 0xac, 0x2e, 0xc9, 0x10, + 0xee, 0xcd, 0x22, 0x50, 0xa3, 0x43, 0xba, 0x38, 0xc3, 0xbb, 0x53, 0xc4, 0x0b, 0x98, 0x65, 0xd7, + 0x2c, 0x87, 0x51, 0xe6, 0xa7, 0x49, 0xea, 0x5d, 0xb0, 0x71, 0x60, 0xdb, 0xee, 0x17, 0xc4, 0x3c, + 0x6c, 0x9e, 0xd4, 0x7d, 0xab, 0x47, 0x7c, 0x78, 0x13, 0xcc, 0x3b, 0xb8, 0x4b, 0x64, 0xe9, 0xa6, + 0x74, 0x6b, 0x59, 0x5f, 0x7d, 0x3e, 0x50, 0xe6, 0x86, 0x03, 0x65, 0xfe, 0x09, 0xee, 0x12, 0xc4, + 0x2d, 0xea, 0x27, 0xa0, 0x22, 0x58, 0xc7, 0x36, 0xb9, 0xfa, 0xcc, 0xb5, 0x83, 0x2e, 0x81, 0xdf, + 0x07, 0x8b, 0x26, 0x17, 0x10, 0xc4, 0x75, 0x41, 0x5c, 0x1c, 0xc9, 0x22, 0x61, 0x55, 0x29, 0xb8, + 0x2e, 0xc8, 0x8f, 0x5c, 0xca, 0x1a, 0x98, 0x75, 0xe0, 0x3e, 0x00, 0x1e, 0x66, 0x9d, 0x86, 0x4f, + 0x2e, 0xac, 0x2b, 0x41, 0x87, 0x82, 0x0e, 0x1a, 0xb1, 0x05, 0x4d, 0xa0, 0xe0, 0x87, 0xa0, 0xec, + 0x13, 0x6c, 0x9e, 0x39, 0x76, 0x5f, 0xbe, 0x76, 0x53, 0xba, 0x55, 0xd6, 0x37, 0x04, 0xa3, 0x8c, + 0x44, 0x3b, 0x8a, 0x11, 0xea, 0x7f, 0x24, 0x50, 0x3e, 0xea, 0x59, 0x06, 0xb3, 0x5c, 0x07, 0xfe, + 0x1e, 0x94, 0xc3, 0xd9, 0x32, 0x31, 0xc3, 0xdc, 0xd9, 0xca, 0xfe, 0x47, 0xda, 0x78, 0x25, 0xc5, + 0xc1, 0xd3, 0xbc, 0xcb, 0x76, 0xd8, 0x40, 0xb5, 0x10, 0xad, 0xf5, 0x6e, 0x6b, 0x67, 0xad, 0xcf, + 0x89, 0xc1, 0x4e, 0x09, 0xc3, 0xe3, 0xee, 0x8d, 0xdb, 0x50, 0xac, 0x0a, 0x6d, 0xb0, 0x66, 0x12, + 0x9b, 0x30, 0x72, 0xe6, 0x85, 0x1e, 0x29, 0xef, 0xe1, 0xca, 0xfe, 0x9d, 0x57, 0x73, 0x53, 0x9f, + 0xa4, 0xea, 0x95, 0xe1, 0x40, 0x59, 0x4b, 0x34, 0xa1, 0xa4, 0xb8, 0xfa, 0x95, 0x04, 0x76, 0x8e, + 0x9b, 0x0f, 0x7d, 0x37, 0xf0, 0x9a, 0x2c, 0x9c, 0xdd, 0x76, 0x5f, 0x98, 0xe0, 0x8f, 0xc1, 0xbc, + 0x1f, 0xd8, 0xd1, 0x5c, 0xbe, 0x1f, 0xcd, 0x25, 0x0a, 0x6c, 0xf2, 0x72, 0xa0, 0x6c, 0xa6, 0x58, + 0x4f, 0xfb, 0x1e, 0x41, 0x9c, 0x00, 0x3f, 0x05, 0x8b, 0x3e, 0x76, 0xda, 0x24, 0xec, 0x7a, 0xe9, + 0xd6, 0xca, 0xbe, 0xaa, 0x15, 0xee, 0x35, 0xed, 0xa4, 0x8e, 0x42, 0xe8, 0x78, 0xc6, 0xf9, 0x27, + 0x45, 0x42, 0x41, 0x3d, 0x05, 0x6b, 0x7c, 0xaa, 0x5d, 0x9f, 0x71, 0x0b, 0x7c, 0x17, 0x94, 0xba, + 0x96, 0xc3, 0x3b, 0xb5, 0xa0, 0xaf, 0x08, 0x56, 0xe9, 0xd4, 0x72, 0x50, 0xd8, 0xce, 0xcd, 0xf8, + 0x8a, 0xc7, 0x6c, 0xd2, 0x8c, 0xaf, 0x50, 0xd8, 0xae, 0x3e, 0x04, 0x4b, 0xc2, 0xe3, 0xa4, 0x50, + 0x69, 0xba, 0x50, 0x29, 0x47, 0xe8, 0xaf, 0xd7, 0xc0, 0x66, 0xc3, 0x35, 0xeb, 0x16, 0xf5, 0x03, + 0x1e, 0x2f, 0x3d, 0x30, 0xdb, 0x84, 0xbd, 0x85, 0xf5, 0xf1, 0x14, 0xcc, 0x53, 0x8f, 0x18, 0x62, + 0x59, 0xec, 0x4f, 0x89, 0x6d, 0x4e, 0xff, 0x9a, 0x1e, 0x31, 0xc6, 0xdb, 0x32, 0xfc, 0x42, 0x5c, + 0x0d, 0x3e, 0x03, 0x8b, 0x94, 0x61, 0x16, 0x50, 0xb9, 0xc4, 0x75, 0xef, 0xbe, 0xa6, 0x2e, 0xe7, + 0x8e, 0x67, 0x71, 0xf4, 0x8d, 0x84, 0xa6, 0xfa, 0x4f, 0x09, 0xdc, 0xc8, 0x61, 0x3d, 0xb6, 0x28, + 0x83, 0xcf, 0x32, 0x11, 0xd3, 0x5e, 0x2d, 0x62, 0x21, 0x9b, 0xc7, 0x2b, 0xde, 0xbc, 0x51, 0xcb, + 0x44, 0xb4, 0x9a, 0x60, 0xc1, 0x62, 0xa4, 0x1b, 0x2d, 0x45, 0xed, 0xf5, 0x86, 0xa5, 0xaf, 0x09, + 0xe9, 0x85, 0x93, 0x50, 0x04, 0x8d, 0xb4, 0xd4, 0x7f, 0x97, 0x72, 0x87, 0x13, 0x86, 0x13, 0x5e, + 0x80, 0xd5, 0xae, 0xe5, 0x1c, 0xf4, 0xb0, 0x65, 0xe3, 0x96, 0xd8, 0x3d, 0xd3, 0x16, 0x41, 0x98, + 0x61, 0xb5, 0x51, 0x86, 0xd5, 0x4e, 0x1c, 0x76, 0xe6, 0x37, 0x99, 0x6f, 0x39, 0x6d, 0x7d, 0x63, + 0x38, 0x50, 0x56, 0x4f, 0x27, 0x94, 0x50, 0x42, 0x17, 0xfe, 0x16, 0x94, 0x29, 0xb1, 0x89, 0xc1, + 0x5c, 0xff, 0xf5, 0x32, 0xc4, 0x63, 0xdc, 0x22, 0x76, 0x53, 0x50, 0xf5, 0xd5, 0x30, 0x6e, 0xd1, + 0x17, 0x8a, 0x25, 0xa1, 0x0d, 0xd6, 0xbb, 0xf8, 0xea, 0xdc, 0xc1, 0xf1, 0x40, 0x4a, 0xdf, 0x71, + 0x20, 0x70, 0x38, 0x50, 0xd6, 0x4f, 0x13, 0x5a, 0x28, 0xa5, 0x0d, 0xbf, 0x94, 0xc0, 0x5e, 0xe0, + 0x74, 0x08, 0xb6, 0x59, 0xa7, 0xdf, 0x70, 0xcd, 0x28, 0xdd, 0x36, 0xf8, 0x0c, 0xc9, 0xf3, 0x3c, + 0x03, 0x3d, 0x18, 0x0e, 0x94, 0xbd, 0xf3, 0x42, 0xd4, 0xcb, 0x81, 0x52, 0x2d, 0xb6, 0xf2, 0xf4, + 0x34, 0xc5, 0x87, 0xfa, 0x8f, 0x05, 0xb0, 0x5b, 0xb8, 0xb0, 0xe1, 0xa7, 0x00, 0xba, 0x2d, 0x4a, + 0xfc, 0x1e, 0x31, 0x1f, 0x8e, 0xca, 0xa0, 0xe5, 0x46, 0xb9, 0x63, 0x4f, 0xac, 0x11, 0x78, 0x96, + 0x41, 0xa0, 0x1c, 0x16, 0xfc, 0xa3, 0x04, 0xd6, 0xcc, 0x91, 0x1b, 0x62, 0x36, 0x5c, 0x33, 0x5a, + 0x9b, 0x0f, 0xbf, 0xcb, 0x96, 0xd3, 0xea, 0x93, 0x4a, 0x47, 0x0e, 0xf3, 0xfb, 0xfa, 0xb6, 0xe8, + 0xd0, 0x5a, 0xc2, 0x86, 0x92, 0x4e, 0xc3, 0x21, 0x99, 0xb1, 0x24, 0x15, 0x65, 0x95, 0xcf, 0xf2, + 0xc2, 0x78, 0x48, 0xf5, 0x0c, 0x02, 0xe5, 0xb0, 0xe0, 0xcf, 0xc1, 0xba, 0x11, 0xf8, 0x3e, 0x71, + 0xd8, 0xa3, 0x51, 0x7c, 0xf9, 0x94, 0x2d, 0xe8, 0x3b, 0x42, 0x67, 0xfd, 0x30, 0x61, 0x45, 0x29, + 0x74, 0xc8, 0x37, 0x09, 0xb5, 0x7c, 0x62, 0x46, 0xfc, 0x85, 0x24, 0xbf, 0x9e, 0xb0, 0xa2, 0x14, + 0x1a, 0xde, 0x07, 0xab, 0xe4, 0xca, 0x23, 0x46, 0x14, 0xd0, 0x45, 0xce, 0xde, 0x12, 0xec, 0xd5, + 0xa3, 0x09, 0x1b, 0x4a, 0x20, 0xa1, 0x01, 0x80, 0xe1, 0x3a, 0xa6, 0x35, 0x2a, 0xb5, 0x4b, 0x7c, + 0x22, 0x6a, 0xaf, 0xb6, 0x91, 0x0e, 0x23, 0xde, 0x38, 0x61, 0xc7, 0x4d, 0x14, 0x4d, 0xc8, 0xee, + 0xd9, 0x00, 0x66, 0xa7, 0x09, 0x6e, 0x80, 0xd2, 0x25, 0xe9, 0x8f, 0xca, 0x2b, 0x0a, 0x7f, 0xc2, + 0x07, 0x60, 0xa1, 0x87, 0xed, 0x80, 0x88, 0x0d, 0xfd, 0xc1, 0xab, 0xf5, 0xe3, 0xa9, 0xd5, 0x25, + 0x68, 0x44, 0xfc, 0xc9, 0xb5, 0xfb, 0x92, 0xfa, 0xb5, 0x04, 0x2a, 0x0d, 0xd7, 0x6c, 0x12, 0x23, + 0xf0, 0x2d, 0xd6, 0x1f, 0xad, 0xef, 0xb7, 0x50, 0x98, 0x50, 0xa2, 0x30, 0x7d, 0x34, 0x7d, 0x35, + 0x27, 0x7b, 0x57, 0x54, 0x96, 0xd4, 0xe7, 0x12, 0xd8, 0xce, 0xa0, 0xdf, 0x42, 0xd9, 0xf8, 0x65, + 0xb2, 0x6c, 0x7c, 0xf8, 0x3a, 0x83, 0x29, 0x28, 0x1a, 0x5f, 0x57, 0x72, 0x86, 0xc2, 0x4b, 0x46, + 0x78, 0x84, 0xf5, 0xad, 0x9e, 0x65, 0x93, 0x36, 0x31, 0xf9, 0x60, 0xca, 0x13, 0x47, 0xd8, 0xd8, + 0x82, 0x26, 0x50, 0x90, 0x82, 0x1d, 0x93, 0x5c, 0xe0, 0xc0, 0x66, 0x07, 0xa6, 0x79, 0x88, 0x3d, + 0xdc, 0xb2, 0x6c, 0x8b, 0x59, 0xe2, 0xcc, 0xb5, 0xac, 0x7f, 0x32, 0x1c, 0x28, 0x3b, 0xf5, 0x5c, + 0xc4, 0xcb, 0x81, 0xf2, 0x6e, 0xf6, 0xca, 0xa2, 0xc5, 0x90, 0x3e, 0x2a, 0x90, 0x86, 0x7d, 0x20, + 0xfb, 0xe4, 0x0f, 0x41, 0xb8, 0xf3, 0xea, 0xbe, 0xeb, 0x25, 0xdc, 0x96, 0xb8, 0xdb, 0x9f, 0x0d, + 0x07, 0x8a, 0x8c, 0x0a, 0x30, 0xb3, 0x1d, 0x17, 0xca, 0xc3, 0xcf, 0xc1, 0x26, 0x16, 0x97, 0x8d, + 0x49, 0xaf, 0xf3, 0xdc, 0xeb, 0xfd, 0xe1, 0x40, 0xd9, 0x3c, 0xc8, 0x9a, 0x67, 0x3b, 0xcc, 0x13, + 0x85, 0x35, 0xb0, 0xd4, 0xe3, 0xf7, 0x12, 0x2a, 0x2f, 0x70, 0xfd, 0xed, 0xe1, 0x40, 0x59, 0x1a, + 0x5d, 0x55, 0x42, 0xcd, 0xc5, 0xe3, 0x26, 0x2f, 0x27, 0x11, 0x0a, 0x7e, 0x0c, 0x56, 0x3a, 0x2e, + 0x65, 0x4f, 0x08, 0xfb, 0xc2, 0xf5, 0x2f, 0x79, 0xf6, 0x29, 0xeb, 0x9b, 0x62, 0x06, 0x57, 0x1e, + 0x8d, 0x4d, 0x68, 0x12, 0x07, 0x7f, 0x0d, 0x96, 0x3b, 0xe2, 0x6c, 0x1b, 0xa5, 0x9e, 0x5b, 0x53, + 0x16, 0x5a, 0xe2, 0x1c, 0xac, 0x57, 0x84, 0xfc, 0x72, 0xd4, 0x4c, 0xd1, 0x58, 0x0d, 0xfe, 0x00, + 0x2c, 0xf1, 0x8f, 0x93, 0xba, 0x5c, 0xe6, 0xbd, 0xb9, 0x2e, 0xe0, 0x4b, 0x8f, 0x46, 0xcd, 0x28, + 0xb2, 0x47, 0xd0, 0x93, 0xc6, 0xa1, 0xbc, 0x9c, 0x85, 0x9e, 0x34, 0x0e, 0x51, 0x64, 0x87, 0xcf, + 0xc0, 0x12, 0x25, 0x8f, 0x2d, 0x27, 0xb8, 0x92, 0x01, 0xdf, 0x72, 0xb7, 0xa7, 0x74, 0xb7, 0x79, + 0xc4, 0x91, 0xa9, 0x5b, 0xc5, 0x58, 0x5d, 0xd8, 0x51, 0x24, 0x09, 0x4d, 0xb0, 0xec, 0x07, 0xce, + 0x01, 0x3d, 0xa7, 0xc4, 0x97, 0x57, 0x32, 0x47, 0x9a, 0xb4, 0x3e, 0x8a, 0xb0, 0x69, 0x0f, 0x71, + 0x64, 0x62, 0x04, 0x1a, 0x0b, 0x43, 0x13, 0x00, 0xfe, 0xc1, 0x2f, 0x2f, 0xf2, 0xce, 0xcc, 0xc3, + 0x2e, 0x8a, 0xc1, 0x69, 0x3f, 0xeb, 0xe1, 0xf6, 0x1c, 0x9b, 0xd1, 0x84, 0x2e, 0xfc, 0x93, 0x04, + 0x20, 0x0d, 0x3c, 0xcf, 0x26, 0x5d, 0xe2, 0x30, 0x6c, 0xf3, 0x56, 0x2a, 0xaf, 0x72, 0x77, 0x3f, + 0x9d, 0x16, 0xb5, 0x0c, 0x29, 0xed, 0x36, 0xae, 0xcd, 0x59, 0x28, 0xca, 0xf1, 0x19, 0x4e, 0xda, + 0x85, 0x18, 0xed, 0xda, 0xcc, 0x49, 0xcb, 0xbf, 0x0a, 0x8e, 0x27, 0x4d, 0xd8, 0x51, 0x24, 0x09, + 0x3f, 0x03, 0x3b, 0xd1, 0x45, 0x19, 0xb9, 0x2e, 0x3b, 0xb6, 0x6c, 0x42, 0xfb, 0x94, 0x91, 0xae, + 0xbc, 0xce, 0x17, 0x53, 0x55, 0x30, 0x77, 0x50, 0x2e, 0x0a, 0x15, 0xb0, 0x61, 0x17, 0x28, 0x51, + 0x12, 0x0a, 0x77, 0x68, 0x9c, 0x05, 0x8f, 0xa8, 0x81, 0xed, 0xd1, 0xe9, 0xeb, 0x3a, 0x77, 0xf0, + 0xfe, 0x70, 0xa0, 0x28, 0xf5, 0xe9, 0x50, 0x34, 0x4b, 0x0b, 0xfe, 0x0a, 0xc8, 0xb8, 0xc8, 0xcf, + 0x06, 0xf7, 0xf3, 0xbd, 0x30, 0xb3, 0x15, 0x3a, 0x28, 0x64, 0x43, 0x0f, 0x6c, 0xe0, 0xe4, 0x93, + 0x05, 0x95, 0x2b, 0x7c, 0xaf, 0x7f, 0x30, 0x65, 0x1e, 0x52, 0xaf, 0x1c, 0xba, 0x2c, 0xc2, 0xb8, + 0x91, 0x32, 0x50, 0x94, 0x51, 0x87, 0x57, 0x00, 0xe2, 0xf4, 0x0b, 0x0b, 0x95, 0xe1, 0xcc, 0x42, + 0x96, 0x79, 0x96, 0x19, 0x2f, 0xb5, 0x8c, 0x89, 0xa2, 0x1c, 0x1f, 0x90, 0x81, 0x0a, 0x4e, 0xbd, + 0x08, 0x51, 0xf9, 0x06, 0x77, 0xfc, 0xc3, 0xd9, 0x8e, 0x63, 0x8e, 0xbe, 0x2b, 0xfc, 0x56, 0xd2, + 0x16, 0x8a, 0xb2, 0x0e, 0xe0, 0x63, 0xb0, 0x25, 0x1a, 0xcf, 0x1d, 0x8a, 0x2f, 0x48, 0xb3, 0x4f, + 0x0d, 0x66, 0x53, 0x79, 0x93, 0xe7, 0x6e, 0x79, 0x38, 0x50, 0xb6, 0x0e, 0x72, 0xec, 0x28, 0x97, + 0x05, 0x1f, 0x80, 0x8d, 0x0b, 0xd7, 0x6f, 0x59, 0xa6, 0x49, 0x9c, 0x48, 0x69, 0x8b, 0x2b, 0x6d, + 0x85, 0xf1, 0x3f, 0x4e, 0xd9, 0x50, 0x06, 0x0d, 0x29, 0xd8, 0x16, 0xca, 0x0d, 0xdf, 0x35, 0x4e, + 0xdd, 0xc0, 0x61, 0x61, 0xb9, 0xa0, 0xf2, 0x76, 0x5c, 0x22, 0xb7, 0x0f, 0xf2, 0x00, 0x2f, 0x07, + 0xca, 0xcd, 0x9c, 0x72, 0x95, 0x00, 0xa1, 0x7c, 0x6d, 0x68, 0x83, 0x55, 0xf1, 0xc6, 0x77, 0x68, + 0x63, 0x4a, 0x65, 0x99, 0x6f, 0xf5, 0x7b, 0xd3, 0x13, 0x5b, 0x0c, 0x4f, 0xef, 0x77, 0x7e, 0xf9, + 0x9c, 0x04, 0xa0, 0x84, 0xba, 0xfa, 0x17, 0x09, 0xec, 0x16, 0x26, 0x46, 0x78, 0x2f, 0xf1, 0x70, + 0xa4, 0xa6, 0x1e, 0x8e, 0x60, 0x96, 0xf8, 0x06, 0xde, 0x8d, 0xbe, 0x92, 0x80, 0x5c, 0x54, 0x21, + 0xe0, 0xc7, 0x89, 0x0e, 0xbe, 0x97, 0xea, 0x60, 0x25, 0xc3, 0x7b, 0x03, 0xfd, 0xfb, 0x97, 0x04, + 0xde, 0x99, 0x32, 0x03, 0x71, 0x42, 0x22, 0xe6, 0x24, 0xea, 0x09, 0x0e, 0xb7, 0xb2, 0xc4, 0xd7, + 0xd1, 0x38, 0x21, 0xe5, 0x60, 0x50, 0x21, 0x1b, 0x9e, 0x83, 0x1b, 0x22, 0x1b, 0xa6, 0x6d, 0xfc, + 0xe4, 0xbe, 0xac, 0xbf, 0x33, 0x1c, 0x28, 0x37, 0xea, 0xf9, 0x10, 0x54, 0xc4, 0x55, 0xff, 0x26, + 0x81, 0x9d, 0xfc, 0x92, 0x0f, 0xef, 0x24, 0xc2, 0xad, 0xa4, 0xc2, 0x7d, 0x3d, 0xc5, 0x12, 0xc1, + 0xfe, 0x1d, 0x58, 0x17, 0x07, 0x83, 0xe4, 0x3b, 0x68, 0x22, 0xe8, 0xe1, 0x16, 0x09, 0xcf, 0xf4, + 0x42, 0x22, 0x5a, 0xbe, 0xfc, 0xc9, 0x21, 0xd9, 0x86, 0x52, 0x6a, 0xea, 0xdf, 0x25, 0xf0, 0xde, + 0xcc, 0x62, 0x0b, 0xf5, 0x44, 0xd7, 0xb5, 0x54, 0xd7, 0xab, 0xc5, 0x02, 0x6f, 0xe6, 0x39, 0x54, + 0xff, 0xc5, 0xf3, 0x17, 0xd5, 0xb9, 0x6f, 0x5e, 0x54, 0xe7, 0xbe, 0x7d, 0x51, 0x9d, 0xfb, 0x72, + 0x58, 0x95, 0x9e, 0x0f, 0xab, 0xd2, 0x37, 0xc3, 0xaa, 0xf4, 0xed, 0xb0, 0x2a, 0xfd, 0x77, 0x58, + 0x95, 0xfe, 0xfc, 0xbf, 0xea, 0xdc, 0x6f, 0x76, 0x0b, 0xff, 0x06, 0xf9, 0x7f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xb4, 0x84, 0x53, 0xfb, 0x3b, 0x19, 0x00, 0x00, } func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { @@ -1089,6 +1091,13 @@ func (m *PodDisruptionBudgetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + if m.UnhealthyPodEvictionPolicy != nil { + i -= len(*m.UnhealthyPodEvictionPolicy) + copy(dAtA[i:], *m.UnhealthyPodEvictionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UnhealthyPodEvictionPolicy))) + i-- + dAtA[i] = 0x22 + } if m.MaxUnavailable != nil { { size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) @@ -1937,6 +1946,10 @@ func (m *PodDisruptionBudgetSpec) Size() (n int) { l = m.MaxUnavailable.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.UnhealthyPodEvictionPolicy != nil { + l = len(*m.UnhealthyPodEvictionPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2308,6 +2321,7 @@ func (this *PodDisruptionBudgetSpec) String() string { `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `UnhealthyPodEvictionPolicy:` + valueToStringGenerated(this.UnhealthyPodEvictionPolicy) + `,`, `}`, }, "") return s @@ -3581,6 +3595,39 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnhealthyPodEvictionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := UnhealthyPodEvictionPolicyType(dAtA[iNdEx:postIndex]) + m.UnhealthyPodEvictionPolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index 09dbdde98..16301c236 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -153,6 +153,34 @@ message PodDisruptionBudgetSpec { // by specifying 0. This is a mutually exclusive setting with "minAvailable". // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; + + // UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods + // should be considered for eviction. Current implementation considers healthy pods, + // as pods that have status.conditions item with type="Ready",status="True". + // + // Valid policies are IfHealthyBudget and AlwaysAllow. + // If no policy is specified, the default behavior will be used, + // which corresponds to the IfHealthyBudget policy. + // + // IfHealthyBudget policy means that running pods (status.phase="Running"), + // but not yet healthy can be evicted only if the guarded application is not + // disrupted (status.currentHealthy is at least equal to status.desiredHealthy). + // Healthy pods will be subject to the PDB for eviction. + // + // AlwaysAllow policy means that all running pods (status.phase="Running"), + // but not yet healthy are considered disrupted and can be evicted regardless + // of whether the criteria in a PDB is met. This means perspective running + // pods of a disrupted application might not get a chance to become healthy. + // Healthy pods will be subject to the PDB for eviction. + // + // Additional policies may be added in the future. + // Clients making eviction decisions should disallow eviction of unhealthy pods + // if they encounter an unrecognized policy in this field. + // + // This field is beta-level. The eviction API uses this field when + // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). + // +optional + optional string unhealthyPodEvictionPolicy = 4; } // PodDisruptionBudgetStatus represents information about the status of a @@ -329,7 +357,6 @@ message PodSecurityPolicySpec { // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // This is a beta field, and is only honored if the API server enables the CSIInlineVolume feature gate. // +optional repeated AllowedCSIDriver allowedCSIDrivers = 23; diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go index 486f93461..1e6b075e3 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -45,8 +45,56 @@ type PodDisruptionBudgetSpec struct { // by specifying 0. This is a mutually exclusive setting with "minAvailable". // +optional MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,3,opt,name=maxUnavailable"` + + // UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods + // should be considered for eviction. Current implementation considers healthy pods, + // as pods that have status.conditions item with type="Ready",status="True". + // + // Valid policies are IfHealthyBudget and AlwaysAllow. + // If no policy is specified, the default behavior will be used, + // which corresponds to the IfHealthyBudget policy. + // + // IfHealthyBudget policy means that running pods (status.phase="Running"), + // but not yet healthy can be evicted only if the guarded application is not + // disrupted (status.currentHealthy is at least equal to status.desiredHealthy). + // Healthy pods will be subject to the PDB for eviction. + // + // AlwaysAllow policy means that all running pods (status.phase="Running"), + // but not yet healthy are considered disrupted and can be evicted regardless + // of whether the criteria in a PDB is met. This means perspective running + // pods of a disrupted application might not get a chance to become healthy. + // Healthy pods will be subject to the PDB for eviction. + // + // Additional policies may be added in the future. + // Clients making eviction decisions should disallow eviction of unhealthy pods + // if they encounter an unrecognized policy in this field. + // + // This field is beta-level. The eviction API uses this field when + // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). + // +optional + UnhealthyPodEvictionPolicy *UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty" protobuf:"bytes,4,opt,name=unhealthyPodEvictionPolicy"` } +// UnhealthyPodEvictionPolicyType defines the criteria for when unhealthy pods +// should be considered for eviction. +// +enum +type UnhealthyPodEvictionPolicyType string + +const ( + // IfHealthyBudget policy means that running pods (status.phase="Running"), + // but not yet healthy can be evicted only if the guarded application is not + // disrupted (status.currentHealthy is at least equal to status.desiredHealthy). + // Healthy pods will be subject to the PDB for eviction. + IfHealthyBudget UnhealthyPodEvictionPolicyType = "IfHealthyBudget" + + // AlwaysAllow policy means that all running pods (status.phase="Running"), + // but not yet healthy are considered disrupted and can be evicted regardless + // of whether the criteria in a PDB is met. This means perspective running + // pods of a disrupted application might not get a chance to become healthy. + // Healthy pods will be subject to the PDB for eviction. + AlwaysAllow UnhealthyPodEvictionPolicyType = "AlwaysAllow" +) + // PodDisruptionBudgetStatus represents information about the status of a // PodDisruptionBudget. Status may trail the actual state of a system. type PodDisruptionBudgetStatus struct { @@ -278,7 +326,6 @@ type PodSecurityPolicySpec struct { AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // This is a beta field, and is only honored if the API server enables the CSIInlineVolume feature gate. // +optional AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index ef81d43af..266a9a853 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AllowedCSIDriver = map[string]string{ @@ -117,10 +117,11 @@ func (PodDisruptionBudgetList) SwaggerDoc() map[string]string { } var map_PodDisruptionBudgetSpec = map[string]string{ - "": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.", - "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", - "selector": "Label query over pods whose evictions are managed by the disruption budget. A null selector selects no pods. An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. In policy/v1, an empty selector will select all pods in the namespace.", - "maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".", + "": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.", + "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", + "selector": "Label query over pods whose evictions are managed by the disruption budget. A null selector selects no pods. An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. In policy/v1, an empty selector will select all pods in the namespace.", + "maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".", + "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).", } func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { @@ -183,7 +184,7 @@ var map_PodSecurityPolicySpec = map[string]string{ "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", "allowedHostPaths": "allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used.", "allowedFlexVolumes": "allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedCSIDrivers": "AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes. This is a beta field, and is only honored if the API server enables the CSIInlineVolume feature gate.", + "allowedCSIDrivers": "AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes.", "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", "allowedProcMountTypes": "AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go index 0a6239b87..8602d1adc 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -239,6 +239,11 @@ func (in *PodDisruptionBudgetSpec) DeepCopyInto(out *PodDisruptionBudgetSpec) { *out = new(intstr.IntOrString) **out = **in } + if in.UnhealthyPodEvictionPolicy != nil { + in, out := &in.UnhealthyPodEvictionPolicy, &out.UnhealthyPodEvictionPolicy + *out = new(UnhealthyPodEvictionPolicyType) + **out = **in + } return } diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto index 1cb19d5da..13ff60ea7 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1/generated.proto @@ -66,6 +66,7 @@ message ClusterRoleBinding { // RoleRef can only reference a ClusterRole in the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. + // This field is immutable. optional RoleRef roleRef = 3; } @@ -96,7 +97,7 @@ message PolicyRule { repeated string verbs = 1; // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. + // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. // +optional repeated string apiGroups = 2; @@ -140,6 +141,7 @@ message RoleBinding { // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. + // This field is immutable. optional RoleRef roleRef = 3; } diff --git a/vendor/k8s.io/api/rbac/v1/types.go b/vendor/k8s.io/api/rbac/v1/types.go index 067b6f15e..ce845d69b 100644 --- a/vendor/k8s.io/api/rbac/v1/types.go +++ b/vendor/k8s.io/api/rbac/v1/types.go @@ -51,7 +51,7 @@ type PolicyRule struct { Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. + // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. // +optional APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` // Resources is a list of resources this rule applies to. '*' represents all resources. @@ -132,6 +132,7 @@ type RoleBinding struct { // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. + // This field is immutable. RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` } @@ -209,6 +210,7 @@ type ClusterRoleBinding struct { // RoleRef can only reference a ClusterRole in the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. + // This field is immutable. RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` } diff --git a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go index 228ee54c0..0471a5594 100644 --- a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ @@ -51,7 +51,7 @@ var map_ClusterRoleBinding = map[string]string{ "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", "metadata": "Standard object's metadata.", "subjects": "Subjects holds references to the objects the role applies to.", - "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", + "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. This field is immutable.", } func (ClusterRoleBinding) SwaggerDoc() map[string]string { @@ -81,7 +81,7 @@ func (ClusterRoleList) SwaggerDoc() map[string]string { var map_PolicyRule = map[string]string{ "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.", - "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"\" represents the core API group and \"*\" represents all API groups.", "resources": "Resources is a list of resources this rule applies to. '*' represents all resources.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", @@ -105,7 +105,7 @@ var map_RoleBinding = map[string]string{ "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", "metadata": "Standard object's metadata.", "subjects": "Subjects holds references to the objects the role applies to.", - "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", + "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. This field is immutable.", } func (RoleBinding) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto index bc0da93ce..d5ceaa0e8 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto @@ -100,7 +100,7 @@ message PolicyRule { repeated string verbs = 1; // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. + // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. // +optional repeated string apiGroups = 3; diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go index 13a0a1f0a..e0e75b150 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types.go @@ -51,7 +51,7 @@ type PolicyRule struct { Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. + // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. // +optional APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,3,rep,name=apiGroups"` // Resources is a list of resources this rule applies to. '*' represents all resources. diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go index 46b8b9ee6..6708f3e58 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ @@ -81,7 +81,7 @@ func (ClusterRoleList) SwaggerDoc() map[string]string { var map_PolicyRule = map[string]string{ "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.", - "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"\" represents the core API group and \"*\" represents all API groups.", "resources": "Resources is a list of resources this rule applies to. '*' represents all resources.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto index c0988d1b6..f6b2f0dde 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.proto @@ -100,7 +100,7 @@ message PolicyRule { repeated string verbs = 1; // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. + // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. // +optional repeated string apiGroups = 2; diff --git a/vendor/k8s.io/api/rbac/v1beta1/types.go b/vendor/k8s.io/api/rbac/v1beta1/types.go index 96e6b18f5..4941cd2ab 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types.go @@ -51,7 +51,7 @@ type PolicyRule struct { Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. + // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. // +optional APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` // Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. diff --git a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go index 5d57cb348..fff1fe40f 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ @@ -81,7 +81,7 @@ func (ClusterRoleList) SwaggerDoc() map[string]string { var map_PolicyRule = map[string]string{ "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.", - "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"\" represents the core API group and \"*\" represents all API groups.", "resources": "Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", diff --git a/vendor/k8s.io/api/resource/v1alpha2/doc.go b/vendor/k8s.io/api/resource/v1alpha2/doc.go new file mode 100644 index 000000000..d9c20e089 --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha2/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package + +// +groupName=resource.k8s.io + +// Package v1alpha2 is the v1alpha2 version of the resource API. +package v1alpha2 // import "k8s.io/api/resource/v1alpha2" diff --git a/vendor/k8s.io/api/resource/v1alpha2/generated.pb.go b/vendor/k8s.io/api/resource/v1alpha2/generated.pb.go new file mode 100644 index 000000000..2e8f9c724 --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha2/generated.pb.go @@ -0,0 +1,4817 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/resource/v1alpha2/generated.proto + +package v1alpha2 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AllocationResult) Reset() { *m = AllocationResult{} } +func (*AllocationResult) ProtoMessage() {} +func (*AllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{0} +} +func (m *AllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocationResult.Merge(m, src) +} +func (m *AllocationResult) XXX_Size() int { + return m.Size() +} +func (m *AllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_AllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocationResult proto.InternalMessageInfo + +func (m *PodSchedulingContext) Reset() { *m = PodSchedulingContext{} } +func (*PodSchedulingContext) ProtoMessage() {} +func (*PodSchedulingContext) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{1} +} +func (m *PodSchedulingContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSchedulingContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSchedulingContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContext.Merge(m, src) +} +func (m *PodSchedulingContext) XXX_Size() int { + return m.Size() +} +func (m *PodSchedulingContext) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContext.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSchedulingContext proto.InternalMessageInfo + +func (m *PodSchedulingContextList) Reset() { *m = PodSchedulingContextList{} } +func (*PodSchedulingContextList) ProtoMessage() {} +func (*PodSchedulingContextList) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{2} +} +func (m *PodSchedulingContextList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSchedulingContextList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSchedulingContextList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContextList.Merge(m, src) +} +func (m *PodSchedulingContextList) XXX_Size() int { + return m.Size() +} +func (m *PodSchedulingContextList) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContextList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSchedulingContextList proto.InternalMessageInfo + +func (m *PodSchedulingContextSpec) Reset() { *m = PodSchedulingContextSpec{} } +func (*PodSchedulingContextSpec) ProtoMessage() {} +func (*PodSchedulingContextSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{3} +} +func (m *PodSchedulingContextSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSchedulingContextSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSchedulingContextSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContextSpec.Merge(m, src) +} +func (m *PodSchedulingContextSpec) XXX_Size() int { + return m.Size() +} +func (m *PodSchedulingContextSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContextSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSchedulingContextSpec proto.InternalMessageInfo + +func (m *PodSchedulingContextStatus) Reset() { *m = PodSchedulingContextStatus{} } +func (*PodSchedulingContextStatus) ProtoMessage() {} +func (*PodSchedulingContextStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{4} +} +func (m *PodSchedulingContextStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSchedulingContextStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSchedulingContextStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContextStatus.Merge(m, src) +} +func (m *PodSchedulingContextStatus) XXX_Size() int { + return m.Size() +} +func (m *PodSchedulingContextStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContextStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSchedulingContextStatus proto.InternalMessageInfo + +func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } +func (*ResourceClaim) ProtoMessage() {} +func (*ResourceClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{5} +} +func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaim.Merge(m, src) +} +func (m *ResourceClaim) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaim) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo + +func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} } +func (*ResourceClaimConsumerReference) ProtoMessage() {} +func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{6} +} +func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimConsumerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimConsumerReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimConsumerReference.Merge(m, src) +} +func (m *ResourceClaimConsumerReference) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimConsumerReference) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimConsumerReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo + +func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} } +func (*ResourceClaimList) ProtoMessage() {} +func (*ResourceClaimList) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{7} +} +func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimList.Merge(m, src) +} +func (m *ResourceClaimList) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo + +func (m *ResourceClaimParametersReference) Reset() { *m = ResourceClaimParametersReference{} } +func (*ResourceClaimParametersReference) ProtoMessage() {} +func (*ResourceClaimParametersReference) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{8} +} +func (m *ResourceClaimParametersReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimParametersReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimParametersReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimParametersReference.Merge(m, src) +} +func (m *ResourceClaimParametersReference) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimParametersReference) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimParametersReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimParametersReference proto.InternalMessageInfo + +func (m *ResourceClaimSchedulingStatus) Reset() { *m = ResourceClaimSchedulingStatus{} } +func (*ResourceClaimSchedulingStatus) ProtoMessage() {} +func (*ResourceClaimSchedulingStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{9} +} +func (m *ResourceClaimSchedulingStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimSchedulingStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimSchedulingStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimSchedulingStatus.Merge(m, src) +} +func (m *ResourceClaimSchedulingStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimSchedulingStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimSchedulingStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimSchedulingStatus proto.InternalMessageInfo + +func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} } +func (*ResourceClaimSpec) ProtoMessage() {} +func (*ResourceClaimSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{10} +} +func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimSpec.Merge(m, src) +} +func (m *ResourceClaimSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo + +func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} } +func (*ResourceClaimStatus) ProtoMessage() {} +func (*ResourceClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{11} +} +func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimStatus.Merge(m, src) +} +func (m *ResourceClaimStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo + +func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} } +func (*ResourceClaimTemplate) ProtoMessage() {} +func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{12} +} +func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplate.Merge(m, src) +} +func (m *ResourceClaimTemplate) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo + +func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} } +func (*ResourceClaimTemplateList) ProtoMessage() {} +func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{13} +} +func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplateList.Merge(m, src) +} +func (m *ResourceClaimTemplateList) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplateList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplateList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo + +func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} } +func (*ResourceClaimTemplateSpec) ProtoMessage() {} +func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{14} +} +func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplateSpec.Merge(m, src) +} +func (m *ResourceClaimTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo + +func (m *ResourceClass) Reset() { *m = ResourceClass{} } +func (*ResourceClass) ProtoMessage() {} +func (*ResourceClass) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{15} +} +func (m *ResourceClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClass.Merge(m, src) +} +func (m *ResourceClass) XXX_Size() int { + return m.Size() +} +func (m *ResourceClass) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClass.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClass proto.InternalMessageInfo + +func (m *ResourceClassList) Reset() { *m = ResourceClassList{} } +func (*ResourceClassList) ProtoMessage() {} +func (*ResourceClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{16} +} +func (m *ResourceClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClassList.Merge(m, src) +} +func (m *ResourceClassList) XXX_Size() int { + return m.Size() +} +func (m *ResourceClassList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClassList proto.InternalMessageInfo + +func (m *ResourceClassParametersReference) Reset() { *m = ResourceClassParametersReference{} } +func (*ResourceClassParametersReference) ProtoMessage() {} +func (*ResourceClassParametersReference) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{17} +} +func (m *ResourceClassParametersReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClassParametersReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClassParametersReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClassParametersReference.Merge(m, src) +} +func (m *ResourceClassParametersReference) XXX_Size() int { + return m.Size() +} +func (m *ResourceClassParametersReference) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClassParametersReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClassParametersReference proto.InternalMessageInfo + +func (m *ResourceHandle) Reset() { *m = ResourceHandle{} } +func (*ResourceHandle) ProtoMessage() {} +func (*ResourceHandle) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{18} +} +func (m *ResourceHandle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceHandle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceHandle) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceHandle.Merge(m, src) +} +func (m *ResourceHandle) XXX_Size() int { + return m.Size() +} +func (m *ResourceHandle) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceHandle.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceHandle proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha2.AllocationResult") + proto.RegisterType((*PodSchedulingContext)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContext") + proto.RegisterType((*PodSchedulingContextList)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextList") + proto.RegisterType((*PodSchedulingContextSpec)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextSpec") + proto.RegisterType((*PodSchedulingContextStatus)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextStatus") + proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaim") + proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimConsumerReference") + proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimList") + proto.RegisterType((*ResourceClaimParametersReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimParametersReference") + proto.RegisterType((*ResourceClaimSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimSchedulingStatus") + proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimSpec") + proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimStatus") + proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplate") + proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplateList") + proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplateSpec") + proto.RegisterType((*ResourceClass)(nil), "k8s.io.api.resource.v1alpha2.ResourceClass") + proto.RegisterType((*ResourceClassList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassList") + proto.RegisterType((*ResourceClassParametersReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassParametersReference") + proto.RegisterType((*ResourceHandle)(nil), "k8s.io.api.resource.v1alpha2.ResourceHandle") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/resource/v1alpha2/generated.proto", fileDescriptor_3add37bbd52889e0) +} + +var fileDescriptor_3add37bbd52889e0 = []byte{ + // 1233 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xda, 0x6e, 0x95, 0x4c, 0x1a, 0x37, 0xd9, 0xb6, 0xe0, 0x46, 0xad, 0x63, 0xf6, 0x14, + 0x89, 0xb2, 0xdb, 0x06, 0x54, 0x2a, 0xfe, 0x49, 0xd9, 0x06, 0x4a, 0x04, 0x4d, 0xc3, 0x98, 0x8a, + 0x16, 0x21, 0xd4, 0xc9, 0xee, 0xab, 0xbd, 0x64, 0xff, 0xb1, 0x33, 0x6b, 0xa8, 0xb8, 0xf4, 0x23, + 0xf4, 0xc0, 0x01, 0x4e, 0x1c, 0xf9, 0x02, 0x7c, 0x03, 0x84, 0xd4, 0x63, 0x11, 0x1c, 0x7a, 0xb2, + 0xa8, 0xf9, 0x08, 0x9c, 0xe8, 0x09, 0xcd, 0x78, 0x77, 0xbd, 0xb3, 0xf6, 0x9a, 0x38, 0x07, 0x0b, + 0x4e, 0xc9, 0xcc, 0xfb, 0xbd, 0xdf, 0xfb, 0x37, 0xef, 0xcd, 0xac, 0xd1, 0xbb, 0x87, 0xd7, 0xa8, + 0xee, 0x04, 0xc6, 0x61, 0x7c, 0x00, 0x91, 0x0f, 0x0c, 0xa8, 0xd1, 0x03, 0xdf, 0x0e, 0x22, 0x23, + 0x11, 0x90, 0xd0, 0x31, 0x22, 0xa0, 0x41, 0x1c, 0x59, 0x60, 0xf4, 0xae, 0x10, 0x37, 0xec, 0x92, + 0x2d, 0xa3, 0x03, 0x3e, 0x44, 0x84, 0x81, 0xad, 0x87, 0x51, 0xc0, 0x02, 0xf5, 0xc2, 0x10, 0xad, + 0x93, 0xd0, 0xd1, 0x53, 0xb4, 0x9e, 0xa2, 0xd7, 0x5f, 0xe9, 0x38, 0xac, 0x1b, 0x1f, 0xe8, 0x56, + 0xe0, 0x19, 0x9d, 0xa0, 0x13, 0x18, 0x42, 0xe9, 0x20, 0xbe, 0x2f, 0x56, 0x62, 0x21, 0xfe, 0x1b, + 0x92, 0xad, 0x6b, 0x39, 0xd3, 0x56, 0x10, 0x71, 0xb3, 0x45, 0x83, 0xeb, 0xaf, 0x8d, 0x30, 0x1e, + 0xb1, 0xba, 0x8e, 0x0f, 0xd1, 0x03, 0x23, 0x3c, 0xec, 0xf0, 0x0d, 0x6a, 0x78, 0xc0, 0xc8, 0x24, + 0x2d, 0xa3, 0x4c, 0x2b, 0x8a, 0x7d, 0xe6, 0x78, 0x30, 0xa6, 0x70, 0xf5, 0xdf, 0x14, 0xa8, 0xd5, + 0x05, 0x8f, 0x14, 0xf5, 0xb4, 0xef, 0x2a, 0x68, 0x75, 0xdb, 0x75, 0x03, 0x8b, 0x30, 0x27, 0xf0, + 0x31, 0xd0, 0xd8, 0x65, 0x6a, 0x80, 0x4e, 0xa7, 0xb9, 0x79, 0x9f, 0xf8, 0xb6, 0x0b, 0xb4, 0xa1, + 0xb4, 0xaa, 0x9b, 0xcb, 0x5b, 0x97, 0xf4, 0x69, 0xe9, 0xd3, 0xb1, 0xa4, 0x64, 0xbe, 0xf8, 0xb8, + 0xbf, 0xb1, 0x30, 0xe8, 0x6f, 0x9c, 0x96, 0xf7, 0x29, 0x2e, 0xb2, 0xab, 0x07, 0x68, 0x95, 0xf4, + 0x88, 0xe3, 0x92, 0x03, 0x17, 0x6e, 0xf9, 0x7b, 0x81, 0x0d, 0xb4, 0x51, 0x69, 0x29, 0x9b, 0xcb, + 0x5b, 0xad, 0xbc, 0x45, 0x9e, 0x63, 0xbd, 0x77, 0x45, 0xe7, 0x80, 0x36, 0xb8, 0x60, 0xb1, 0x20, + 0x32, 0xcf, 0x0e, 0xfa, 0x1b, 0xab, 0xdb, 0x05, 0x6d, 0x3c, 0xc6, 0xa7, 0x1a, 0x68, 0x89, 0x76, + 0x49, 0x04, 0x7c, 0xaf, 0x51, 0x6d, 0x29, 0x9b, 0x8b, 0xe6, 0x5a, 0xe2, 0xe0, 0x52, 0x3b, 0x15, + 0xe0, 0x11, 0x46, 0xfb, 0xa9, 0x82, 0xce, 0xee, 0x07, 0x76, 0xdb, 0xea, 0x82, 0x1d, 0xbb, 0x8e, + 0xdf, 0xb9, 0x1e, 0xf8, 0x0c, 0xbe, 0x66, 0xea, 0x3d, 0xb4, 0xc8, 0xeb, 0x66, 0x13, 0x46, 0x1a, + 0x8a, 0xf0, 0xf2, 0x72, 0xce, 0xcb, 0x2c, 0xfd, 0x7a, 0x78, 0xd8, 0xe1, 0x1b, 0x54, 0xe7, 0x68, + 0xee, 0xf7, 0xad, 0x83, 0x2f, 0xc0, 0x62, 0x37, 0x81, 0x11, 0x53, 0x4d, 0x4c, 0xa3, 0xd1, 0x1e, + 0xce, 0x58, 0xd5, 0x3b, 0xa8, 0x46, 0x43, 0xb0, 0x92, 0x1c, 0x5c, 0x9d, 0x9e, 0xf5, 0x49, 0x3e, + 0xb6, 0x43, 0xb0, 0xcc, 0x53, 0x89, 0x8d, 0x1a, 0x5f, 0x61, 0xc1, 0xa8, 0xde, 0x43, 0x27, 0x29, + 0x23, 0x2c, 0xa6, 0x22, 0x05, 0xcb, 0x5b, 0xd7, 0x8e, 0xc1, 0x2d, 0xf4, 0xcd, 0x7a, 0xc2, 0x7e, + 0x72, 0xb8, 0xc6, 0x09, 0xaf, 0xf6, 0xab, 0x82, 0x1a, 0x93, 0xd4, 0x3e, 0x74, 0x28, 0x53, 0x3f, + 0x1b, 0x4b, 0x9d, 0x7e, 0xb4, 0xd4, 0x71, 0x6d, 0x91, 0xb8, 0xd5, 0xc4, 0xec, 0x62, 0xba, 0x93, + 0x4b, 0xdb, 0x27, 0xe8, 0x84, 0xc3, 0xc0, 0xe3, 0x67, 0x87, 0x9f, 0xd6, 0xad, 0xd9, 0x63, 0x33, + 0x57, 0x12, 0xfa, 0x13, 0xbb, 0x9c, 0x08, 0x0f, 0xf9, 0xb4, 0x47, 0x25, 0x31, 0xf1, 0xc4, 0xaa, + 0xd7, 0xd0, 0x29, 0x2a, 0x0e, 0x23, 0xd8, 0xfc, 0xa4, 0x89, 0xb8, 0x96, 0xcc, 0xb3, 0x09, 0xd1, + 0xa9, 0x76, 0x4e, 0x86, 0x25, 0xa4, 0xfa, 0x06, 0xaa, 0x87, 0x01, 0x03, 0x9f, 0x39, 0xc4, 0x4d, + 0x0f, 0x7d, 0x75, 0x73, 0xc9, 0x54, 0x07, 0xfd, 0x8d, 0xfa, 0xbe, 0x24, 0xc1, 0x05, 0xa4, 0xf6, + 0xbd, 0x82, 0xd6, 0xcb, 0xab, 0xa3, 0x7e, 0x83, 0xea, 0x69, 0xc4, 0xd7, 0x5d, 0xe2, 0x78, 0x69, + 0x07, 0xbf, 0x79, 0xb4, 0x0e, 0x16, 0x3a, 0x23, 0xee, 0xa4, 0xe4, 0x2f, 0x24, 0x31, 0xd5, 0x25, + 0x18, 0xc5, 0x05, 0x53, 0xda, 0x0f, 0x15, 0xb4, 0x22, 0x41, 0xe6, 0xd0, 0x32, 0x1f, 0x49, 0x2d, + 0x63, 0xcc, 0x12, 0x66, 0x59, 0xaf, 0xdc, 0x2d, 0xf4, 0xca, 0x95, 0x59, 0x48, 0xa7, 0x37, 0xc9, + 0x40, 0x41, 0x4d, 0x09, 0x7f, 0x3d, 0xf0, 0x69, 0xec, 0x41, 0x84, 0xe1, 0x3e, 0x44, 0xe0, 0x5b, + 0xa0, 0x5e, 0x42, 0x8b, 0x24, 0x74, 0x6e, 0x44, 0x41, 0x1c, 0x26, 0x47, 0x2a, 0x3b, 0xfa, 0xdb, + 0xfb, 0xbb, 0x62, 0x1f, 0x67, 0x08, 0x8e, 0x4e, 0x3d, 0x12, 0xde, 0xe6, 0xd0, 0xa9, 0x1d, 0x9c, + 0x21, 0xd4, 0x16, 0xaa, 0xf9, 0xc4, 0x83, 0x46, 0x4d, 0x20, 0xb3, 0xd8, 0xf7, 0x88, 0x07, 0x58, + 0x48, 0x54, 0x13, 0x55, 0x63, 0xc7, 0x6e, 0x9c, 0x10, 0x80, 0xcb, 0x09, 0xa0, 0x7a, 0x7b, 0x77, + 0xe7, 0x79, 0x7f, 0xe3, 0xa5, 0xb2, 0xbb, 0x86, 0x3d, 0x08, 0x81, 0xea, 0xb7, 0x77, 0x77, 0x30, + 0x57, 0xd6, 0x7e, 0x56, 0xd0, 0x9a, 0x14, 0xe4, 0x1c, 0x46, 0xc0, 0xbe, 0x3c, 0x02, 0x5e, 0x9e, + 0xa1, 0x64, 0x25, 0xbd, 0xff, 0xad, 0x82, 0x5a, 0x12, 0x6e, 0x9f, 0x44, 0xc4, 0x03, 0x06, 0x11, + 0x3d, 0x6e, 0xb1, 0x5a, 0xa8, 0x76, 0xe8, 0xf8, 0xb6, 0x38, 0xab, 0xb9, 0xf4, 0x7f, 0xe0, 0xf8, + 0x36, 0x16, 0x92, 0xac, 0x40, 0xd5, 0xb2, 0x02, 0x69, 0x0f, 0x15, 0x74, 0x71, 0x6a, 0xb7, 0x66, + 0x1c, 0x4a, 0x69, 0x91, 0xdf, 0x46, 0xa7, 0x63, 0x9f, 0xc6, 0x0e, 0xe3, 0xf7, 0x5d, 0x7e, 0x00, + 0x9d, 0xe1, 0xb7, 0xf6, 0x6d, 0x59, 0x84, 0x8b, 0x58, 0xed, 0xc7, 0x4a, 0xa1, 0xbe, 0x62, 0x1c, + 0xde, 0x40, 0x6b, 0xb9, 0x71, 0x40, 0xe9, 0xde, 0xc8, 0x87, 0xf3, 0x89, 0x0f, 0x79, 0xad, 0x21, + 0x00, 0x8f, 0xeb, 0xa8, 0x5f, 0xa1, 0x95, 0x30, 0x9f, 0xea, 0xa4, 0xb5, 0xdf, 0x99, 0xa1, 0xa4, + 0x13, 0x4a, 0x65, 0xae, 0x0d, 0xfa, 0x1b, 0x2b, 0x92, 0x00, 0xcb, 0x76, 0xd4, 0x7d, 0x54, 0x27, + 0xd9, 0x93, 0xe8, 0x26, 0x1f, 0xe9, 0xc3, 0x32, 0x6c, 0xa6, 0xe3, 0x6f, 0x5b, 0x92, 0x3e, 0x1f, + 0xdb, 0xc1, 0x05, 0x7d, 0xed, 0xaf, 0x0a, 0x3a, 0x33, 0x61, 0x3c, 0xa8, 0x5b, 0x08, 0xd9, 0x91, + 0xd3, 0x83, 0x28, 0x97, 0xa4, 0x6c, 0xcc, 0xed, 0x64, 0x12, 0x9c, 0x43, 0xa9, 0x9f, 0x23, 0x34, + 0x62, 0x4f, 0x72, 0xa2, 0x4f, 0xcf, 0x49, 0xf1, 0x81, 0x67, 0xd6, 0x39, 0x7f, 0x6e, 0x37, 0xc7, + 0xa8, 0x52, 0xb4, 0x1c, 0x01, 0x85, 0xa8, 0x07, 0xf6, 0x7b, 0x41, 0xd4, 0xa8, 0x8a, 0x3e, 0x7a, + 0x6b, 0x86, 0xa4, 0x8f, 0x8d, 0x32, 0xf3, 0x4c, 0x12, 0xd2, 0x32, 0x1e, 0x11, 0xe3, 0xbc, 0x15, + 0xb5, 0x8d, 0xce, 0xd9, 0x40, 0x72, 0x6e, 0x7e, 0x19, 0x03, 0x65, 0x60, 0x8b, 0x09, 0xb5, 0x68, + 0x5e, 0x4c, 0x08, 0xce, 0xed, 0x4c, 0x02, 0xe1, 0xc9, 0xba, 0xda, 0xef, 0x0a, 0x3a, 0x27, 0x79, + 0xf6, 0x31, 0x78, 0xa1, 0x4b, 0x18, 0xcc, 0xe1, 0x3a, 0xba, 0x2b, 0x5d, 0x47, 0xaf, 0xcf, 0x90, + 0xbe, 0xd4, 0xc9, 0xb2, 0x6b, 0x49, 0xfb, 0x4d, 0x41, 0xe7, 0x27, 0x6a, 0xcc, 0x61, 0xbc, 0xde, + 0x91, 0xc7, 0xeb, 0xab, 0xc7, 0x88, 0xab, 0x64, 0xcc, 0x3e, 0x29, 0x8b, 0xaa, 0x3d, 0x7c, 0xb6, + 0xfe, 0xff, 0xde, 0x0f, 0xda, 0xdf, 0xf2, 0x33, 0x88, 0xd2, 0x39, 0x84, 0x21, 0x4f, 0x94, 0xca, + 0x91, 0x26, 0xca, 0xd8, 0xa0, 0xad, 0xce, 0x38, 0x68, 0x29, 0x3d, 0xde, 0xa0, 0xbd, 0x8b, 0x56, + 0xe4, 0xdb, 0xa7, 0x76, 0xc4, 0x6f, 0x3e, 0x41, 0xdd, 0x96, 0x6e, 0x27, 0x99, 0xa9, 0xf8, 0xf6, + 0xa0, 0xf4, 0xbf, 0xfc, 0xf6, 0xa0, 0xb4, 0xa4, 0x29, 0x7e, 0x91, 0xdf, 0x1e, 0x13, 0xf3, 0x3c, + 0xff, 0xb7, 0x07, 0xff, 0x94, 0xe6, 0x7f, 0x69, 0x48, 0xac, 0xf4, 0x0d, 0x99, 0x7d, 0x4a, 0xef, + 0xa5, 0x02, 0x3c, 0xc2, 0x68, 0xf7, 0x51, 0x5d, 0xfe, 0x0d, 0xe0, 0x58, 0x37, 0x5f, 0x0b, 0xd5, + 0x44, 0xe5, 0x0a, 0xae, 0xef, 0x10, 0x46, 0xb0, 0x90, 0x98, 0xe6, 0xe3, 0x67, 0xcd, 0x85, 0x27, + 0xcf, 0x9a, 0x0b, 0x4f, 0x9f, 0x35, 0x17, 0x1e, 0x0e, 0x9a, 0xca, 0xe3, 0x41, 0x53, 0x79, 0x32, + 0x68, 0x2a, 0x4f, 0x07, 0x4d, 0xe5, 0x8f, 0x41, 0x53, 0x79, 0xf4, 0x67, 0x73, 0xe1, 0xd3, 0x0b, + 0xd3, 0x7e, 0x31, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff, 0x67, 0xe4, 0xf6, 0x18, 0x69, 0x12, 0x00, + 0x00, +} + +func (m *AllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Shareable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if m.AvailableOnNodes != nil { + { + size, err := m.AvailableOnNodes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ResourceHandles) > 0 { + for iNdEx := len(m.ResourceHandles) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceHandles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PodSchedulingContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSchedulingContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSchedulingContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSchedulingContextList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSchedulingContextList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSchedulingContextList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSchedulingContextSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSchedulingContextSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSchedulingContextSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PotentialNodes) > 0 { + for iNdEx := len(m.PotentialNodes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PotentialNodes[iNdEx]) + copy(dAtA[i:], m.PotentialNodes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PotentialNodes[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.SelectedNode) + copy(dAtA[i:], m.SelectedNode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelectedNode))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSchedulingContextStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSchedulingContextStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSchedulingContextStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceClaims) > 0 { + for iNdEx := len(m.ResourceClaims) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceClaims[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x2a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimParametersReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimParametersReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimParametersReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimSchedulingStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimSchedulingStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimSchedulingStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.UnsuitableNodes) > 0 { + for iNdEx := len(m.UnsuitableNodes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.UnsuitableNodes[iNdEx]) + copy(dAtA[i:], m.UnsuitableNodes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UnsuitableNodes[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.AllocationMode) + copy(dAtA[i:], m.AllocationMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode))) + i-- + dAtA[i] = 0x1a + if m.ParametersRef != nil { + { + size, err := m.ParametersRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.ResourceClassName) + copy(dAtA[i:], m.ResourceClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceClassName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.DeallocationRequested { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if len(m.ReservedFor) > 0 { + for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ReservedFor[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Allocation != nil { + { + size, err := m.Allocation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.DriverName) + copy(dAtA[i:], m.DriverName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SuitableNodes != nil { + { + size, err := m.SuitableNodes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.ParametersRef != nil { + { + size, err := m.ParametersRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.DriverName) + copy(dAtA[i:], m.DriverName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClassParametersReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClassParametersReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClassParametersReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceHandle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceHandle) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceHandle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + i -= len(m.DriverName) + copy(dAtA[i:], m.DriverName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceHandles) > 0 { + for _, e := range m.ResourceHandles { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AvailableOnNodes != nil { + l = m.AvailableOnNodes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *PodSchedulingContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSchedulingContextList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSchedulingContextSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SelectedNode) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.PotentialNodes) > 0 { + for _, s := range m.PotentialNodes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSchedulingContextStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceClaims) > 0 { + for _, e := range m.ResourceClaims { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimConsumerReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimParametersReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimSchedulingStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UnsuitableNodes) > 0 { + for _, s := range m.UnsuitableNodes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ResourceClassName) + n += 1 + l + sovGenerated(uint64(l)) + if m.ParametersRef != nil { + l = m.ParametersRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.AllocationMode) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DriverName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Allocation != nil { + l = m.Allocation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ReservedFor) > 0 { + for _, e := range m.ReservedFor { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + return n +} + +func (m *ResourceClaimTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimTemplateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DriverName) + n += 1 + l + sovGenerated(uint64(l)) + if m.ParametersRef != nil { + l = m.ParametersRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SuitableNodes != nil { + l = m.SuitableNodes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ResourceClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClassParametersReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceHandle) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DriverName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AllocationResult) String() string { + if this == nil { + return "nil" + } + repeatedStringForResourceHandles := "[]ResourceHandle{" + for _, f := range this.ResourceHandles { + repeatedStringForResourceHandles += strings.Replace(strings.Replace(f.String(), "ResourceHandle", "ResourceHandle", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceHandles += "}" + s := strings.Join([]string{`&AllocationResult{`, + `ResourceHandles:` + repeatedStringForResourceHandles + `,`, + `AvailableOnNodes:` + strings.Replace(fmt.Sprintf("%v", this.AvailableOnNodes), "NodeSelector", "v1.NodeSelector", 1) + `,`, + `Shareable:` + fmt.Sprintf("%v", this.Shareable) + `,`, + `}`, + }, "") + return s +} +func (this *PodSchedulingContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSchedulingContext{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSchedulingContextSpec", "PodSchedulingContextSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSchedulingContextStatus", "PodSchedulingContextStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSchedulingContextList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodSchedulingContext{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSchedulingContext", "PodSchedulingContext", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodSchedulingContextList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodSchedulingContextSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSchedulingContextSpec{`, + `SelectedNode:` + fmt.Sprintf("%v", this.SelectedNode) + `,`, + `PotentialNodes:` + fmt.Sprintf("%v", this.PotentialNodes) + `,`, + `}`, + }, "") + return s +} +func (this *PodSchedulingContextStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForResourceClaims := "[]ResourceClaimSchedulingStatus{" + for _, f := range this.ResourceClaims { + repeatedStringForResourceClaims += strings.Replace(strings.Replace(f.String(), "ResourceClaimSchedulingStatus", "ResourceClaimSchedulingStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceClaims += "}" + s := strings.Join([]string{`&PodSchedulingContextStatus{`, + `ResourceClaims:` + repeatedStringForResourceClaims + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaim{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceClaimStatus", "ResourceClaimStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimConsumerReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimConsumerReference{`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceClaim{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaim", "ResourceClaim", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceClaimList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimParametersReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimParametersReference{`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimSchedulingStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimSchedulingStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UnsuitableNodes:` + fmt.Sprintf("%v", this.UnsuitableNodes) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimSpec{`, + `ResourceClassName:` + fmt.Sprintf("%v", this.ResourceClassName) + `,`, + `ParametersRef:` + strings.Replace(this.ParametersRef.String(), "ResourceClaimParametersReference", "ResourceClaimParametersReference", 1) + `,`, + `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForReservedFor := "[]ResourceClaimConsumerReference{" + for _, f := range this.ReservedFor { + repeatedStringForReservedFor += strings.Replace(strings.Replace(f.String(), "ResourceClaimConsumerReference", "ResourceClaimConsumerReference", 1), `&`, ``, 1) + "," + } + repeatedStringForReservedFor += "}" + s := strings.Join([]string{`&ResourceClaimStatus{`, + `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, + `Allocation:` + strings.Replace(this.Allocation.String(), "AllocationResult", "AllocationResult", 1) + `,`, + `ReservedFor:` + repeatedStringForReservedFor + `,`, + `DeallocationRequested:` + fmt.Sprintf("%v", this.DeallocationRequested) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimTemplateSpec", "ResourceClaimTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceClaimTemplate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaimTemplate", "ResourceClaimTemplate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceClaimTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, + `ParametersRef:` + strings.Replace(this.ParametersRef.String(), "ResourceClassParametersReference", "ResourceClassParametersReference", 1) + `,`, + `SuitableNodes:` + strings.Replace(fmt.Sprintf("%v", this.SuitableNodes), "NodeSelector", "v1.NodeSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClass", "ResourceClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClassParametersReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClassParametersReference{`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceHandle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceHandle{`, + `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceHandles", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceHandles = append(m.ResourceHandles, ResourceHandle{}) + if err := m.ResourceHandles[len(m.ResourceHandles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableOnNodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AvailableOnNodes == nil { + m.AvailableOnNodes = &v1.NodeSelector{} + } + if err := m.AvailableOnNodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Shareable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Shareable = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSchedulingContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSchedulingContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSchedulingContextList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSchedulingContextList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodSchedulingContext{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSchedulingContextSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSchedulingContextSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelectedNode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelectedNode = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PotentialNodes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PotentialNodes = append(m.PotentialNodes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSchedulingContextStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSchedulingContextStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaims", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceClaims = append(m.ResourceClaims, ResourceClaimSchedulingStatus{}) + if err := m.ResourceClaims[len(m.ResourceClaims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimConsumerReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimConsumerReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceClaim{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimParametersReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimParametersReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimParametersReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimSchedulingStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimSchedulingStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimSchedulingStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnsuitableNodes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UnsuitableNodes = append(m.UnsuitableNodes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParametersRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ParametersRef == nil { + m.ParametersRef = &ResourceClaimParametersReference{} + } + if err := m.ParametersRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllocationMode = AllocationMode(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DriverName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Allocation == nil { + m.Allocation = &AllocationResult{} + } + if err := m.Allocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReservedFor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReservedFor = append(m.ReservedFor, ResourceClaimConsumerReference{}) + if err := m.ReservedFor[len(m.ReservedFor)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeallocationRequested", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DeallocationRequested = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceClaimTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DriverName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParametersRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ParametersRef == nil { + m.ParametersRef = &ResourceClassParametersReference{} + } + if err := m.ParametersRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SuitableNodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SuitableNodes == nil { + m.SuitableNodes = &v1.NodeSelector{} + } + if err := m.SuitableNodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClassParametersReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClassParametersReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceHandle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceHandle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceHandle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DriverName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/resource/v1alpha2/generated.proto b/vendor/k8s.io/api/resource/v1alpha2/generated.proto new file mode 100644 index 000000000..02412398c --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha2/generated.proto @@ -0,0 +1,400 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.resource.v1alpha2; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/resource/v1alpha2"; + +// AllocationResult contains attributes of an allocated resource. +message AllocationResult { + // ResourceHandles contain the state associated with an allocation that + // should be maintained throughout the lifetime of a claim. Each + // ResourceHandle contains data that should be passed to a specific kubelet + // plugin once it lands on a node. This data is returned by the driver + // after a successful allocation and is opaque to Kubernetes. Driver + // documentation may explain to users how to interpret this data if needed. + // + // Setting this field is optional. It has a maximum size of 32 entries. + // If null (or empty), it is assumed this allocation will be processed by a + // single kubelet plugin with no ResourceHandle data attached. The name of + // the kubelet plugin invoked will match the DriverName set in the + // ResourceClaimStatus this AllocationResult is embedded in. + // + // +listType=atomic + // +optional + repeated ResourceHandle resourceHandles = 1; + + // This field will get set by the resource driver after it has allocated + // the resource to inform the scheduler where it can schedule Pods using + // the ResourceClaim. + // + // Setting this field is optional. If null, the resource is available + // everywhere. + // +optional + optional k8s.io.api.core.v1.NodeSelector availableOnNodes = 2; + + // Shareable determines whether the resource supports more + // than one consumer at a time. + // +optional + optional bool shareable = 3; +} + +// PodSchedulingContext objects hold information that is needed to schedule +// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation +// mode. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message PodSchedulingContext { + // Standard object metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec describes where resources for the Pod are needed. + optional PodSchedulingContextSpec spec = 2; + + // Status describes where resources for the Pod can be allocated. + // +optional + optional PodSchedulingContextStatus status = 3; +} + +// PodSchedulingContextList is a collection of Pod scheduling objects. +message PodSchedulingContextList { + // Standard list metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of PodSchedulingContext objects. + repeated PodSchedulingContext items = 2; +} + +// PodSchedulingContextSpec describes where resources for the Pod are needed. +message PodSchedulingContextSpec { + // SelectedNode is the node for which allocation of ResourceClaims that + // are referenced by the Pod and that use "WaitForFirstConsumer" + // allocation is to be attempted. + // +optional + optional string selectedNode = 1; + + // PotentialNodes lists nodes where the Pod might be able to run. + // + // The size of this field is limited to 128. This is large enough for + // many clusters. Larger clusters may need more attempts to find a node + // that suits all pending resources. This may get increased in the + // future, but not reduced. + // + // +listType=set + // +optional + repeated string potentialNodes = 2; +} + +// PodSchedulingContextStatus describes where resources for the Pod can be allocated. +message PodSchedulingContextStatus { + // ResourceClaims describes resource availability for each + // pod.spec.resourceClaim entry where the corresponding ResourceClaim + // uses "WaitForFirstConsumer" allocation mode. + // + // +listType=map + // +listMapKey=name + // +optional + repeated ResourceClaimSchedulingStatus resourceClaims = 1; +} + +// ResourceClaim describes which resources are needed by a resource consumer. +// Its status tracks whether the resource has been allocated and what the +// resulting attributes are. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message ResourceClaim { + // Standard object metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec describes the desired attributes of a resource that then needs + // to be allocated. It can only be set once when creating the + // ResourceClaim. + optional ResourceClaimSpec spec = 2; + + // Status describes whether the resource is available and with which + // attributes. + // +optional + optional ResourceClaimStatus status = 3; +} + +// ResourceClaimConsumerReference contains enough information to let you +// locate the consumer of a ResourceClaim. The user must be a resource in the same +// namespace as the ResourceClaim. +message ResourceClaimConsumerReference { + // APIGroup is the group for the resource being referenced. It is + // empty for the core API. This matches the group in the APIVersion + // that is used when creating the resources. + // +optional + optional string apiGroup = 1; + + // Resource is the type of resource being referenced, for example "pods". + optional string resource = 3; + + // Name is the name of resource being referenced. + optional string name = 4; + + // UID identifies exactly one incarnation of the resource. + optional string uid = 5; +} + +// ResourceClaimList is a collection of claims. +message ResourceClaimList { + // Standard list metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource claims. + repeated ResourceClaim items = 2; +} + +// ResourceClaimParametersReference contains enough information to let you +// locate the parameters for a ResourceClaim. The object must be in the same +// namespace as the ResourceClaim. +message ResourceClaimParametersReference { + // APIGroup is the group for the resource being referenced. It is + // empty for the core API. This matches the group in the APIVersion + // that is used when creating the resources. + // +optional + optional string apiGroup = 1; + + // Kind is the type of resource being referenced. This is the same + // value as in the parameter object's metadata, for example "ConfigMap". + optional string kind = 2; + + // Name is the name of resource being referenced. + optional string name = 3; +} + +// ResourceClaimSchedulingStatus contains information about one particular +// ResourceClaim with "WaitForFirstConsumer" allocation mode. +message ResourceClaimSchedulingStatus { + // Name matches the pod.spec.resourceClaims[*].Name field. + // +optional + optional string name = 1; + + // UnsuitableNodes lists nodes that the ResourceClaim cannot be + // allocated for. + // + // The size of this field is limited to 128, the same as for + // PodSchedulingSpec.PotentialNodes. This may get increased in the + // future, but not reduced. + // + // +listType=set + // +optional + repeated string unsuitableNodes = 2; +} + +// ResourceClaimSpec defines how a resource is to be allocated. +message ResourceClaimSpec { + // ResourceClassName references the driver and additional parameters + // via the name of a ResourceClass that was created as part of the + // driver deployment. + optional string resourceClassName = 1; + + // ParametersRef references a separate object with arbitrary parameters + // that will be used by the driver when allocating a resource for the + // claim. + // + // The object must be in the same namespace as the ResourceClaim. + // +optional + optional ResourceClaimParametersReference parametersRef = 2; + + // Allocation can start immediately or when a Pod wants to use the + // resource. "WaitForFirstConsumer" is the default. + // +optional + optional string allocationMode = 3; +} + +// ResourceClaimStatus tracks whether the resource has been allocated and what +// the resulting attributes are. +message ResourceClaimStatus { + // DriverName is a copy of the driver name from the ResourceClass at + // the time when allocation started. + // +optional + optional string driverName = 1; + + // Allocation is set by the resource driver once a resource or set of + // resources has been allocated successfully. If this is not specified, the + // resources have not been allocated yet. + // +optional + optional AllocationResult allocation = 2; + + // ReservedFor indicates which entities are currently allowed to use + // the claim. A Pod which references a ResourceClaim which is not + // reserved for that Pod will not be started. + // + // There can be at most 32 such reservations. This may get increased in + // the future, but not reduced. + // + // +listType=map + // +listMapKey=uid + // +optional + repeated ResourceClaimConsumerReference reservedFor = 3; + + // DeallocationRequested indicates that a ResourceClaim is to be + // deallocated. + // + // The driver then must deallocate this claim and reset the field + // together with clearing the Allocation field. + // + // While DeallocationRequested is set, no new consumers may be added to + // ReservedFor. + // +optional + optional bool deallocationRequested = 4; +} + +// ResourceClaimTemplate is used to produce ResourceClaim objects. +message ResourceClaimTemplate { + // Standard object metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Describes the ResourceClaim that is to be generated. + // + // This field is immutable. A ResourceClaim will get created by the + // control plane for a Pod when needed and then not get updated + // anymore. + optional ResourceClaimTemplateSpec spec = 2; +} + +// ResourceClaimTemplateList is a collection of claim templates. +message ResourceClaimTemplateList { + // Standard list metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource claim templates. + repeated ResourceClaimTemplate items = 2; +} + +// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. +message ResourceClaimTemplateSpec { + // ObjectMeta may contain labels and annotations that will be copied into the PVC + // when creating it. No other fields are allowed and will be rejected during + // validation. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec for the ResourceClaim. The entire content is copied unchanged + // into the ResourceClaim that gets created from this template. The + // same fields as in a ResourceClaim are also valid here. + optional ResourceClaimSpec spec = 2; +} + +// ResourceClass is used by administrators to influence how resources +// are allocated. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message ResourceClass { + // Standard object metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // DriverName defines the name of the dynamic resource driver that is + // used for allocation of a ResourceClaim that uses this class. + // + // Resource drivers have a unique name in forward domain order + // (acme.example.com). + optional string driverName = 2; + + // ParametersRef references an arbitrary separate object that may hold + // parameters that will be used by the driver when allocating a + // resource that uses this class. A dynamic resource driver can + // distinguish between parameters stored here and and those stored in + // ResourceClaimSpec. + // +optional + optional ResourceClassParametersReference parametersRef = 3; + + // Only nodes matching the selector will be considered by the scheduler + // when trying to find a Node that fits a Pod when that Pod uses + // a ResourceClaim that has not been allocated yet. + // + // Setting this field is optional. If null, all nodes are candidates. + // +optional + optional k8s.io.api.core.v1.NodeSelector suitableNodes = 4; +} + +// ResourceClassList is a collection of classes. +message ResourceClassList { + // Standard list metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource classes. + repeated ResourceClass items = 2; +} + +// ResourceClassParametersReference contains enough information to let you +// locate the parameters for a ResourceClass. +message ResourceClassParametersReference { + // APIGroup is the group for the resource being referenced. It is + // empty for the core API. This matches the group in the APIVersion + // that is used when creating the resources. + // +optional + optional string apiGroup = 1; + + // Kind is the type of resource being referenced. This is the same + // value as in the parameter object's metadata. + optional string kind = 2; + + // Name is the name of resource being referenced. + optional string name = 3; + + // Namespace that contains the referenced resource. Must be empty + // for cluster-scoped resources and non-empty for namespaced + // resources. + // +optional + optional string namespace = 4; +} + +// ResourceHandle holds opaque resource data for processing by a specific kubelet plugin. +message ResourceHandle { + // DriverName specifies the name of the resource driver whose kubelet + // plugin should be invoked to process this ResourceHandle's data once it + // lands on a node. This may differ from the DriverName set in + // ResourceClaimStatus this ResourceHandle is embedded in. + optional string driverName = 1; + + // Data contains the opaque data associated with this ResourceHandle. It is + // set by the controller component of the resource driver whose name + // matches the DriverName set in the ResourceClaimStatus this + // ResourceHandle is embedded in. It is set at allocation time and is + // intended for processing by the kubelet plugin whose name matches + // the DriverName set in this ResourceHandle. + // + // The maximum size of this field is 16KiB. This may get increased in the + // future, but not reduced. + // +optional + optional string data = 2; +} + diff --git a/vendor/k8s.io/api/resource/v1alpha2/register.go b/vendor/k8s.io/api/resource/v1alpha2/register.go new file mode 100644 index 000000000..6e0d7ceb9 --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha2/register.go @@ -0,0 +1,63 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "resource.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ResourceClass{}, + &ResourceClassList{}, + &ResourceClaim{}, + &ResourceClaimList{}, + &ResourceClaimTemplate{}, + &ResourceClaimTemplateList{}, + &PodSchedulingContext{}, + &PodSchedulingContextList{}, + ) + + // Add common types + scheme.AddKnownTypes(SchemeGroupVersion, &metav1.Status{}) + + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/resource/v1alpha2/types.go b/vendor/k8s.io/api/resource/v1alpha2/types.go new file mode 100644 index 000000000..21936bfe3 --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha2/types.go @@ -0,0 +1,462 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ResourceClaim describes which resources are needed by a resource consumer. +// Its status tracks whether the resource has been allocated and what the +// resulting attributes are. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type ResourceClaim struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec describes the desired attributes of a resource that then needs + // to be allocated. It can only be set once when creating the + // ResourceClaim. + Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` + + // Status describes whether the resource is available and with which + // attributes. + // +optional + Status ResourceClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ResourceClaimSpec defines how a resource is to be allocated. +type ResourceClaimSpec struct { + // ResourceClassName references the driver and additional parameters + // via the name of a ResourceClass that was created as part of the + // driver deployment. + ResourceClassName string `json:"resourceClassName" protobuf:"bytes,1,name=resourceClassName"` + + // ParametersRef references a separate object with arbitrary parameters + // that will be used by the driver when allocating a resource for the + // claim. + // + // The object must be in the same namespace as the ResourceClaim. + // +optional + ParametersRef *ResourceClaimParametersReference `json:"parametersRef,omitempty" protobuf:"bytes,2,opt,name=parametersRef"` + + // Allocation can start immediately or when a Pod wants to use the + // resource. "WaitForFirstConsumer" is the default. + // +optional + AllocationMode AllocationMode `json:"allocationMode,omitempty" protobuf:"bytes,3,opt,name=allocationMode"` +} + +// AllocationMode describes whether a ResourceClaim gets allocated immediately +// when it gets created (AllocationModeImmediate) or whether allocation is +// delayed until it is needed for a Pod +// (AllocationModeWaitForFirstConsumer). Other modes might get added in the +// future. +type AllocationMode string + +const ( + // When a ResourceClaim has AllocationModeWaitForFirstConsumer, allocation is + // delayed until a Pod gets scheduled that needs the ResourceClaim. The + // scheduler will consider all resource requirements of that Pod and + // trigger allocation for a node that fits the Pod. + AllocationModeWaitForFirstConsumer AllocationMode = "WaitForFirstConsumer" + + // When a ResourceClaim has AllocationModeImmediate, allocation starts + // as soon as the ResourceClaim gets created. This is done without + // considering the needs of Pods that will use the ResourceClaim + // because those Pods are not known yet. + AllocationModeImmediate AllocationMode = "Immediate" +) + +// ResourceClaimStatus tracks whether the resource has been allocated and what +// the resulting attributes are. +type ResourceClaimStatus struct { + // DriverName is a copy of the driver name from the ResourceClass at + // the time when allocation started. + // +optional + DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"` + + // Allocation is set by the resource driver once a resource or set of + // resources has been allocated successfully. If this is not specified, the + // resources have not been allocated yet. + // +optional + Allocation *AllocationResult `json:"allocation,omitempty" protobuf:"bytes,2,opt,name=allocation"` + + // ReservedFor indicates which entities are currently allowed to use + // the claim. A Pod which references a ResourceClaim which is not + // reserved for that Pod will not be started. + // + // There can be at most 32 such reservations. This may get increased in + // the future, but not reduced. + // + // +listType=map + // +listMapKey=uid + // +optional + ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,3,opt,name=reservedFor"` + + // DeallocationRequested indicates that a ResourceClaim is to be + // deallocated. + // + // The driver then must deallocate this claim and reset the field + // together with clearing the Allocation field. + // + // While DeallocationRequested is set, no new consumers may be added to + // ReservedFor. + // +optional + DeallocationRequested bool `json:"deallocationRequested,omitempty" protobuf:"varint,4,opt,name=deallocationRequested"` +} + +// ReservedForMaxSize is the maximum number of entries in +// claim.status.reservedFor. +const ResourceClaimReservedForMaxSize = 32 + +// AllocationResult contains attributes of an allocated resource. +type AllocationResult struct { + // ResourceHandles contain the state associated with an allocation that + // should be maintained throughout the lifetime of a claim. Each + // ResourceHandle contains data that should be passed to a specific kubelet + // plugin once it lands on a node. This data is returned by the driver + // after a successful allocation and is opaque to Kubernetes. Driver + // documentation may explain to users how to interpret this data if needed. + // + // Setting this field is optional. It has a maximum size of 32 entries. + // If null (or empty), it is assumed this allocation will be processed by a + // single kubelet plugin with no ResourceHandle data attached. The name of + // the kubelet plugin invoked will match the DriverName set in the + // ResourceClaimStatus this AllocationResult is embedded in. + // + // +listType=atomic + // +optional + ResourceHandles []ResourceHandle `json:"resourceHandles,omitempty" protobuf:"bytes,1,opt,name=resourceHandles"` + + // This field will get set by the resource driver after it has allocated + // the resource to inform the scheduler where it can schedule Pods using + // the ResourceClaim. + // + // Setting this field is optional. If null, the resource is available + // everywhere. + // +optional + AvailableOnNodes *v1.NodeSelector `json:"availableOnNodes,omitempty" protobuf:"bytes,2,opt,name=availableOnNodes"` + + // Shareable determines whether the resource supports more + // than one consumer at a time. + // +optional + Shareable bool `json:"shareable,omitempty" protobuf:"varint,3,opt,name=shareable"` +} + +// AllocationResultResourceHandlesMaxSize represents the maximum number of +// entries in allocation.resourceHandles. +const AllocationResultResourceHandlesMaxSize = 32 + +// ResourceHandle holds opaque resource data for processing by a specific kubelet plugin. +type ResourceHandle struct { + // DriverName specifies the name of the resource driver whose kubelet + // plugin should be invoked to process this ResourceHandle's data once it + // lands on a node. This may differ from the DriverName set in + // ResourceClaimStatus this ResourceHandle is embedded in. + DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"` + + // Data contains the opaque data associated with this ResourceHandle. It is + // set by the controller component of the resource driver whose name + // matches the DriverName set in the ResourceClaimStatus this + // ResourceHandle is embedded in. It is set at allocation time and is + // intended for processing by the kubelet plugin whose name matches + // the DriverName set in this ResourceHandle. + // + // The maximum size of this field is 16KiB. This may get increased in the + // future, but not reduced. + // +optional + Data string `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` +} + +// ResourceHandleDataMaxSize represents the maximum size of resourceHandle.data. +const ResourceHandleDataMaxSize = 16 * 1024 + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ResourceClaimList is a collection of claims. +type ResourceClaimList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource claims. + Items []ResourceClaim `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// PodSchedulingContext objects hold information that is needed to schedule +// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation +// mode. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type PodSchedulingContext struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec describes where resources for the Pod are needed. + Spec PodSchedulingContextSpec `json:"spec" protobuf:"bytes,2,name=spec"` + + // Status describes where resources for the Pod can be allocated. + // +optional + Status PodSchedulingContextStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodSchedulingContextSpec describes where resources for the Pod are needed. +type PodSchedulingContextSpec struct { + // SelectedNode is the node for which allocation of ResourceClaims that + // are referenced by the Pod and that use "WaitForFirstConsumer" + // allocation is to be attempted. + // +optional + SelectedNode string `json:"selectedNode,omitempty" protobuf:"bytes,1,opt,name=selectedNode"` + + // PotentialNodes lists nodes where the Pod might be able to run. + // + // The size of this field is limited to 128. This is large enough for + // many clusters. Larger clusters may need more attempts to find a node + // that suits all pending resources. This may get increased in the + // future, but not reduced. + // + // +listType=set + // +optional + PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"` +} + +// PodSchedulingContextStatus describes where resources for the Pod can be allocated. +type PodSchedulingContextStatus struct { + // ResourceClaims describes resource availability for each + // pod.spec.resourceClaim entry where the corresponding ResourceClaim + // uses "WaitForFirstConsumer" allocation mode. + // + // +listType=map + // +listMapKey=name + // +optional + ResourceClaims []ResourceClaimSchedulingStatus `json:"resourceClaims,omitempty" protobuf:"bytes,1,opt,name=resourceClaims"` + + // If there ever is a need to support other kinds of resources + // than ResourceClaim, then new fields could get added here + // for those other resources. +} + +// ResourceClaimSchedulingStatus contains information about one particular +// ResourceClaim with "WaitForFirstConsumer" allocation mode. +type ResourceClaimSchedulingStatus struct { + // Name matches the pod.spec.resourceClaims[*].Name field. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // UnsuitableNodes lists nodes that the ResourceClaim cannot be + // allocated for. + // + // The size of this field is limited to 128, the same as for + // PodSchedulingSpec.PotentialNodes. This may get increased in the + // future, but not reduced. + // + // +listType=set + // +optional + UnsuitableNodes []string `json:"unsuitableNodes,omitempty" protobuf:"bytes,2,opt,name=unsuitableNodes"` +} + +// PodSchedulingNodeListMaxSize defines the maximum number of entries in the +// node lists that are stored in PodSchedulingContext objects. This limit is part +// of the API. +const PodSchedulingNodeListMaxSize = 128 + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// PodSchedulingContextList is a collection of Pod scheduling objects. +type PodSchedulingContextList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of PodSchedulingContext objects. + Items []PodSchedulingContext `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ResourceClass is used by administrators to influence how resources +// are allocated. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type ResourceClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // DriverName defines the name of the dynamic resource driver that is + // used for allocation of a ResourceClaim that uses this class. + // + // Resource drivers have a unique name in forward domain order + // (acme.example.com). + DriverName string `json:"driverName" protobuf:"bytes,2,name=driverName"` + + // ParametersRef references an arbitrary separate object that may hold + // parameters that will be used by the driver when allocating a + // resource that uses this class. A dynamic resource driver can + // distinguish between parameters stored here and and those stored in + // ResourceClaimSpec. + // +optional + ParametersRef *ResourceClassParametersReference `json:"parametersRef,omitempty" protobuf:"bytes,3,opt,name=parametersRef"` + + // Only nodes matching the selector will be considered by the scheduler + // when trying to find a Node that fits a Pod when that Pod uses + // a ResourceClaim that has not been allocated yet. + // + // Setting this field is optional. If null, all nodes are candidates. + // +optional + SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,4,opt,name=suitableNodes"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ResourceClassList is a collection of classes. +type ResourceClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource classes. + Items []ResourceClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ResourceClassParametersReference contains enough information to let you +// locate the parameters for a ResourceClass. +type ResourceClassParametersReference struct { + // APIGroup is the group for the resource being referenced. It is + // empty for the core API. This matches the group in the APIVersion + // that is used when creating the resources. + // +optional + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"` + // Kind is the type of resource being referenced. This is the same + // value as in the parameter object's metadata. + Kind string `json:"kind" protobuf:"bytes,2,name=kind"` + // Name is the name of resource being referenced. + Name string `json:"name" protobuf:"bytes,3,name=name"` + // Namespace that contains the referenced resource. Must be empty + // for cluster-scoped resources and non-empty for namespaced + // resources. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` +} + +// ResourceClaimParametersReference contains enough information to let you +// locate the parameters for a ResourceClaim. The object must be in the same +// namespace as the ResourceClaim. +type ResourceClaimParametersReference struct { + // APIGroup is the group for the resource being referenced. It is + // empty for the core API. This matches the group in the APIVersion + // that is used when creating the resources. + // +optional + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"` + // Kind is the type of resource being referenced. This is the same + // value as in the parameter object's metadata, for example "ConfigMap". + Kind string `json:"kind" protobuf:"bytes,2,name=kind"` + // Name is the name of resource being referenced. + Name string `json:"name" protobuf:"bytes,3,name=name"` +} + +// ResourceClaimConsumerReference contains enough information to let you +// locate the consumer of a ResourceClaim. The user must be a resource in the same +// namespace as the ResourceClaim. +type ResourceClaimConsumerReference struct { + // APIGroup is the group for the resource being referenced. It is + // empty for the core API. This matches the group in the APIVersion + // that is used when creating the resources. + // +optional + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"` + // Resource is the type of resource being referenced, for example "pods". + Resource string `json:"resource" protobuf:"bytes,3,name=resource"` + // Name is the name of resource being referenced. + Name string `json:"name" protobuf:"bytes,4,name=name"` + // UID identifies exactly one incarnation of the resource. + UID types.UID `json:"uid" protobuf:"bytes,5,name=uid"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ResourceClaimTemplate is used to produce ResourceClaim objects. +type ResourceClaimTemplate struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Describes the ResourceClaim that is to be generated. + // + // This field is immutable. A ResourceClaim will get created by the + // control plane for a Pod when needed and then not get updated + // anymore. + Spec ResourceClaimTemplateSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. +type ResourceClaimTemplateSpec struct { + // ObjectMeta may contain labels and annotations that will be copied into the PVC + // when creating it. No other fields are allowed and will be rejected during + // validation. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec for the ResourceClaim. The entire content is copied unchanged + // into the ResourceClaim that gets created from this template. The + // same fields as in a ResourceClaim are also valid here. + Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ResourceClaimTemplateList is a collection of claim templates. +type ResourceClaimTemplateList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource claim templates. + Items []ResourceClaimTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go new file mode 100644 index 000000000..474be8c85 --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go @@ -0,0 +1,232 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AllocationResult = map[string]string{ + "": "AllocationResult contains attributes of an allocated resource.", + "resourceHandles": "ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\n\nSetting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in.", + "availableOnNodes": "This field will get set by the resource driver after it has allocated the resource to inform the scheduler where it can schedule Pods using the ResourceClaim.\n\nSetting this field is optional. If null, the resource is available everywhere.", + "shareable": "Shareable determines whether the resource supports more than one consumer at a time.", +} + +func (AllocationResult) SwaggerDoc() map[string]string { + return map_AllocationResult +} + +var map_PodSchedulingContext = map[string]string{ + "": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Spec describes where resources for the Pod are needed.", + "status": "Status describes where resources for the Pod can be allocated.", +} + +func (PodSchedulingContext) SwaggerDoc() map[string]string { + return map_PodSchedulingContext +} + +var map_PodSchedulingContextList = map[string]string{ + "": "PodSchedulingContextList is a collection of Pod scheduling objects.", + "metadata": "Standard list metadata", + "items": "Items is the list of PodSchedulingContext objects.", +} + +func (PodSchedulingContextList) SwaggerDoc() map[string]string { + return map_PodSchedulingContextList +} + +var map_PodSchedulingContextSpec = map[string]string{ + "": "PodSchedulingContextSpec describes where resources for the Pod are needed.", + "selectedNode": "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.", + "potentialNodes": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.", +} + +func (PodSchedulingContextSpec) SwaggerDoc() map[string]string { + return map_PodSchedulingContextSpec +} + +var map_PodSchedulingContextStatus = map[string]string{ + "": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.", + "resourceClaims": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", +} + +func (PodSchedulingContextStatus) SwaggerDoc() map[string]string { + return map_PodSchedulingContextStatus +} + +var map_ResourceClaim = map[string]string{ + "": "ResourceClaim describes which resources are needed by a resource consumer. Its status tracks whether the resource has been allocated and what the resulting attributes are.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Spec describes the desired attributes of a resource that then needs to be allocated. It can only be set once when creating the ResourceClaim.", + "status": "Status describes whether the resource is available and with which attributes.", +} + +func (ResourceClaim) SwaggerDoc() map[string]string { + return map_ResourceClaim +} + +var map_ResourceClaimConsumerReference = map[string]string{ + "": "ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim.", + "apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", + "resource": "Resource is the type of resource being referenced, for example \"pods\".", + "name": "Name is the name of resource being referenced.", + "uid": "UID identifies exactly one incarnation of the resource.", +} + +func (ResourceClaimConsumerReference) SwaggerDoc() map[string]string { + return map_ResourceClaimConsumerReference +} + +var map_ResourceClaimList = map[string]string{ + "": "ResourceClaimList is a collection of claims.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource claims.", +} + +func (ResourceClaimList) SwaggerDoc() map[string]string { + return map_ResourceClaimList +} + +var map_ResourceClaimParametersReference = map[string]string{ + "": "ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim.", + "apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", + "kind": "Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \"ConfigMap\".", + "name": "Name is the name of resource being referenced.", +} + +func (ResourceClaimParametersReference) SwaggerDoc() map[string]string { + return map_ResourceClaimParametersReference +} + +var map_ResourceClaimSchedulingStatus = map[string]string{ + "": "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.", + "name": "Name matches the pod.spec.resourceClaims[*].Name field.", + "unsuitableNodes": "UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\n\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced.", +} + +func (ResourceClaimSchedulingStatus) SwaggerDoc() map[string]string { + return map_ResourceClaimSchedulingStatus +} + +var map_ResourceClaimSpec = map[string]string{ + "": "ResourceClaimSpec defines how a resource is to be allocated.", + "resourceClassName": "ResourceClassName references the driver and additional parameters via the name of a ResourceClass that was created as part of the driver deployment.", + "parametersRef": "ParametersRef references a separate object with arbitrary parameters that will be used by the driver when allocating a resource for the claim.\n\nThe object must be in the same namespace as the ResourceClaim.", + "allocationMode": "Allocation can start immediately or when a Pod wants to use the resource. \"WaitForFirstConsumer\" is the default.", +} + +func (ResourceClaimSpec) SwaggerDoc() map[string]string { + return map_ResourceClaimSpec +} + +var map_ResourceClaimStatus = map[string]string{ + "": "ResourceClaimStatus tracks whether the resource has been allocated and what the resulting attributes are.", + "driverName": "DriverName is a copy of the driver name from the ResourceClass at the time when allocation started.", + "allocation": "Allocation is set by the resource driver once a resource or set of resources has been allocated successfully. If this is not specified, the resources have not been allocated yet.", + "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", + "deallocationRequested": "DeallocationRequested indicates that a ResourceClaim is to be deallocated.\n\nThe driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nWhile DeallocationRequested is set, no new consumers may be added to ReservedFor.", +} + +func (ResourceClaimStatus) SwaggerDoc() map[string]string { + return map_ResourceClaimStatus +} + +var map_ResourceClaimTemplate = map[string]string{ + "": "ResourceClaimTemplate is used to produce ResourceClaim objects.", + "metadata": "Standard object metadata", + "spec": "Describes the ResourceClaim that is to be generated.\n\nThis field is immutable. A ResourceClaim will get created by the control plane for a Pod when needed and then not get updated anymore.", +} + +func (ResourceClaimTemplate) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplate +} + +var map_ResourceClaimTemplateList = map[string]string{ + "": "ResourceClaimTemplateList is a collection of claim templates.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource claim templates.", +} + +func (ResourceClaimTemplateList) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplateList +} + +var map_ResourceClaimTemplateSpec = map[string]string{ + "": "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.", + "metadata": "ObjectMeta may contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.", + "spec": "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here.", +} + +func (ResourceClaimTemplateSpec) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplateSpec +} + +var map_ResourceClass = map[string]string{ + "": "ResourceClass is used by administrators to influence how resources are allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "driverName": "DriverName defines the name of the dynamic resource driver that is used for allocation of a ResourceClaim that uses this class.\n\nResource drivers have a unique name in forward domain order (acme.example.com).", + "parametersRef": "ParametersRef references an arbitrary separate object that may hold parameters that will be used by the driver when allocating a resource that uses this class. A dynamic resource driver can distinguish between parameters stored here and and those stored in ResourceClaimSpec.", + "suitableNodes": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a ResourceClaim that has not been allocated yet.\n\nSetting this field is optional. If null, all nodes are candidates.", +} + +func (ResourceClass) SwaggerDoc() map[string]string { + return map_ResourceClass +} + +var map_ResourceClassList = map[string]string{ + "": "ResourceClassList is a collection of classes.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource classes.", +} + +func (ResourceClassList) SwaggerDoc() map[string]string { + return map_ResourceClassList +} + +var map_ResourceClassParametersReference = map[string]string{ + "": "ResourceClassParametersReference contains enough information to let you locate the parameters for a ResourceClass.", + "apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", + "kind": "Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata.", + "name": "Name is the name of resource being referenced.", + "namespace": "Namespace that contains the referenced resource. Must be empty for cluster-scoped resources and non-empty for namespaced resources.", +} + +func (ResourceClassParametersReference) SwaggerDoc() map[string]string { + return map_ResourceClassParametersReference +} + +var map_ResourceHandle = map[string]string{ + "": "ResourceHandle holds opaque resource data for processing by a specific kubelet plugin.", + "driverName": "DriverName specifies the name of the resource driver whose kubelet plugin should be invoked to process this ResourceHandle's data once it lands on a node. This may differ from the DriverName set in ResourceClaimStatus this ResourceHandle is embedded in.", + "data": "Data contains the opaque data associated with this ResourceHandle. It is set by the controller component of the resource driver whose name matches the DriverName set in the ResourceClaimStatus this ResourceHandle is embedded in. It is set at allocation time and is intended for processing by the kubelet plugin whose name matches the DriverName set in this ResourceHandle.\n\nThe maximum size of this field is 16KiB. This may get increased in the future, but not reduced.", +} + +func (ResourceHandle) SwaggerDoc() map[string]string { + return map_ResourceHandle +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go b/vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go new file mode 100644 index 000000000..89d521bf0 --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go @@ -0,0 +1,498 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationResult) DeepCopyInto(out *AllocationResult) { + *out = *in + if in.ResourceHandles != nil { + in, out := &in.ResourceHandles, &out.ResourceHandles + *out = make([]ResourceHandle, len(*in)) + copy(*out, *in) + } + if in.AvailableOnNodes != nil { + in, out := &in.AvailableOnNodes, &out.AvailableOnNodes + *out = new(v1.NodeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationResult. +func (in *AllocationResult) DeepCopy() *AllocationResult { + if in == nil { + return nil + } + out := new(AllocationResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContext. +func (in *PodSchedulingContext) DeepCopy() *PodSchedulingContext { + if in == nil { + return nil + } + out := new(PodSchedulingContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSchedulingContext) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSchedulingContextList) DeepCopyInto(out *PodSchedulingContextList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodSchedulingContext, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextList. +func (in *PodSchedulingContextList) DeepCopy() *PodSchedulingContextList { + if in == nil { + return nil + } + out := new(PodSchedulingContextList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSchedulingContextList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSchedulingContextSpec) DeepCopyInto(out *PodSchedulingContextSpec) { + *out = *in + if in.PotentialNodes != nil { + in, out := &in.PotentialNodes, &out.PotentialNodes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextSpec. +func (in *PodSchedulingContextSpec) DeepCopy() *PodSchedulingContextSpec { + if in == nil { + return nil + } + out := new(PodSchedulingContextSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSchedulingContextStatus) DeepCopyInto(out *PodSchedulingContextStatus) { + *out = *in + if in.ResourceClaims != nil { + in, out := &in.ResourceClaims, &out.ResourceClaims + *out = make([]ResourceClaimSchedulingStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextStatus. +func (in *PodSchedulingContextStatus) DeepCopy() *PodSchedulingContextStatus { + if in == nil { + return nil + } + out := new(PodSchedulingContextStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim. +func (in *ResourceClaim) DeepCopy() *ResourceClaim { + if in == nil { + return nil + } + out := new(ResourceClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaim) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimConsumerReference) DeepCopyInto(out *ResourceClaimConsumerReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimConsumerReference. +func (in *ResourceClaimConsumerReference) DeepCopy() *ResourceClaimConsumerReference { + if in == nil { + return nil + } + out := new(ResourceClaimConsumerReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimList) DeepCopyInto(out *ResourceClaimList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimList. +func (in *ResourceClaimList) DeepCopy() *ResourceClaimList { + if in == nil { + return nil + } + out := new(ResourceClaimList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaimList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimParametersReference) DeepCopyInto(out *ResourceClaimParametersReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimParametersReference. +func (in *ResourceClaimParametersReference) DeepCopy() *ResourceClaimParametersReference { + if in == nil { + return nil + } + out := new(ResourceClaimParametersReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimSchedulingStatus) DeepCopyInto(out *ResourceClaimSchedulingStatus) { + *out = *in + if in.UnsuitableNodes != nil { + in, out := &in.UnsuitableNodes, &out.UnsuitableNodes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSchedulingStatus. +func (in *ResourceClaimSchedulingStatus) DeepCopy() *ResourceClaimSchedulingStatus { + if in == nil { + return nil + } + out := new(ResourceClaimSchedulingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimSpec) DeepCopyInto(out *ResourceClaimSpec) { + *out = *in + if in.ParametersRef != nil { + in, out := &in.ParametersRef, &out.ParametersRef + *out = new(ResourceClaimParametersReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSpec. +func (in *ResourceClaimSpec) DeepCopy() *ResourceClaimSpec { + if in == nil { + return nil + } + out := new(ResourceClaimSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimStatus) DeepCopyInto(out *ResourceClaimStatus) { + *out = *in + if in.Allocation != nil { + in, out := &in.Allocation, &out.Allocation + *out = new(AllocationResult) + (*in).DeepCopyInto(*out) + } + if in.ReservedFor != nil { + in, out := &in.ReservedFor, &out.ReservedFor + *out = make([]ResourceClaimConsumerReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimStatus. +func (in *ResourceClaimStatus) DeepCopy() *ResourceClaimStatus { + if in == nil { + return nil + } + out := new(ResourceClaimStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimTemplate) DeepCopyInto(out *ResourceClaimTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplate. +func (in *ResourceClaimTemplate) DeepCopy() *ResourceClaimTemplate { + if in == nil { + return nil + } + out := new(ResourceClaimTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaimTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimTemplateList) DeepCopyInto(out *ResourceClaimTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceClaimTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateList. +func (in *ResourceClaimTemplateList) DeepCopy() *ResourceClaimTemplateList { + if in == nil { + return nil + } + out := new(ResourceClaimTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaimTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimTemplateSpec) DeepCopyInto(out *ResourceClaimTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateSpec. +func (in *ResourceClaimTemplateSpec) DeepCopy() *ResourceClaimTemplateSpec { + if in == nil { + return nil + } + out := new(ResourceClaimTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClass) DeepCopyInto(out *ResourceClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.ParametersRef != nil { + in, out := &in.ParametersRef, &out.ParametersRef + *out = new(ResourceClassParametersReference) + **out = **in + } + if in.SuitableNodes != nil { + in, out := &in.SuitableNodes, &out.SuitableNodes + *out = new(v1.NodeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClass. +func (in *ResourceClass) DeepCopy() *ResourceClass { + if in == nil { + return nil + } + out := new(ResourceClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClassList) DeepCopyInto(out *ResourceClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassList. +func (in *ResourceClassList) DeepCopy() *ResourceClassList { + if in == nil { + return nil + } + out := new(ResourceClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClassParametersReference) DeepCopyInto(out *ResourceClassParametersReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassParametersReference. +func (in *ResourceClassParametersReference) DeepCopy() *ResourceClassParametersReference { + if in == nil { + return nil + } + out := new(ResourceClassParametersReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceHandle) DeepCopyInto(out *ResourceHandle) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHandle. +func (in *ResourceHandle) DeepCopy() *ResourceHandle { + if in == nil { + return nil + } + out := new(ResourceHandle) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/scheduling/v1/generated.proto b/vendor/k8s.io/api/scheduling/v1/generated.proto index afc090777..c1a27e8ba 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1/generated.proto @@ -37,7 +37,7 @@ message PriorityClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. optional int32 value = 2; @@ -54,7 +54,7 @@ message PriorityClass { // +optional optional string description = 4; - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1/types.go b/vendor/k8s.io/api/scheduling/v1/types.go index 0f2989424..146bae40d 100644 --- a/vendor/k8s.io/api/scheduling/v1/types.go +++ b/vendor/k8s.io/api/scheduling/v1/types.go @@ -34,7 +34,7 @@ type PriorityClass struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -51,7 +51,7 @@ type PriorityClass struct { // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go index ac34c531f..f167e1970 100644 --- a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "value": "value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", + "preemptionPolicy": "preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto index 5c60b7ab4..f0878fb16 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -38,7 +38,7 @@ message PriorityClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. optional int32 value = 2; @@ -55,7 +55,7 @@ message PriorityClass { // +optional optional string description = 4; - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types.go b/vendor/k8s.io/api/scheduling/v1alpha1/types.go index 7b0df4864..26ba8ff5d 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types.go @@ -35,7 +35,7 @@ type PriorityClass struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -52,7 +52,7 @@ type PriorityClass struct { // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go index fa25f969c..557005db6 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "value": "value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", + "preemptionPolicy": "preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto index 44b49ea24..43878184d 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto @@ -38,7 +38,7 @@ message PriorityClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. optional int32 value = 2; @@ -55,7 +55,7 @@ message PriorityClass { // +optional optional string description = 4; - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types.go b/vendor/k8s.io/api/scheduling/v1beta1/types.go index e315e1b35..6f88592cf 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types.go @@ -39,7 +39,7 @@ type PriorityClass struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -56,7 +56,7 @@ type PriorityClass struct { // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go index cbc140f44..f42008eb9 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "value": "value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", + "preemptionPolicy": "preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go index a648c426a..d36497432 100644 --- a/vendor/k8s.io/api/storage/v1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -609,111 +609,112 @@ func init() { } var fileDescriptor_3b530c1983504d8d = []byte{ - // 1651 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xbd, 0x6f, 0x1b, 0xcb, - 0x11, 0xd7, 0x89, 0xd4, 0xd7, 0x52, 0xb2, 0xa4, 0x95, 0xe4, 0x30, 0x2a, 0x48, 0xe1, 0x6c, 0x24, - 0xb2, 0x13, 0x1f, 0x6d, 0xd9, 0x31, 0x0c, 0x07, 0x0e, 0xa0, 0x93, 0xe8, 0x58, 0x88, 0x28, 0x29, - 0x4b, 0xc5, 0x30, 0x82, 0x24, 0xf0, 0xea, 0x6e, 0x45, 0xad, 0xc5, 0xfb, 0xf0, 0xed, 0x92, 0x31, - 0x53, 0x25, 0x4d, 0xba, 0x00, 0x49, 0x1b, 0xe4, 0x8f, 0x48, 0x80, 0xa4, 0x49, 0x99, 0x22, 0x70, - 0x3a, 0x23, 0x95, 0x2b, 0xe2, 0x99, 0xaf, 0x7e, 0xaf, 0x7c, 0x85, 0xaa, 0x87, 0xdd, 0x5b, 0xf2, - 0x3e, 0x78, 0x94, 0xa5, 0x86, 0x1d, 0x77, 0x67, 0xe6, 0x37, 0xb3, 0x3b, 0xbf, 0x99, 0x9d, 0x23, - 0xf8, 0xc9, 0xf9, 0x13, 0x66, 0x50, 0xaf, 0x72, 0xde, 0x3a, 0x21, 0x81, 0x4b, 0x38, 0x61, 0x95, - 0x36, 0x71, 0x6d, 0x2f, 0xa8, 0x28, 0x01, 0xf6, 0x69, 0x85, 0x71, 0x2f, 0xc0, 0x0d, 0x52, 0x69, - 0x3f, 0xa8, 0x34, 0x88, 0x4b, 0x02, 0xcc, 0x89, 0x6d, 0xf8, 0x81, 0xc7, 0x3d, 0xb8, 0x16, 0xaa, - 0x19, 0xd8, 0xa7, 0x86, 0x52, 0x33, 0xda, 0x0f, 0xd6, 0xef, 0x35, 0x28, 0x3f, 0x6b, 0x9d, 0x18, - 0x96, 0xe7, 0x54, 0x1a, 0x5e, 0xc3, 0xab, 0x48, 0xed, 0x93, 0xd6, 0xa9, 0x5c, 0xc9, 0x85, 0xfc, - 0x15, 0xa2, 0xac, 0xeb, 0x31, 0x67, 0x96, 0x17, 0x64, 0x79, 0x5a, 0x7f, 0x14, 0xe9, 0x38, 0xd8, - 0x3a, 0xa3, 0x2e, 0x09, 0x3a, 0x15, 0xff, 0xbc, 0x21, 0x8d, 0x02, 0xc2, 0xbc, 0x56, 0x60, 0x91, - 0x6b, 0x59, 0xb1, 0x8a, 0x43, 0x38, 0xce, 0xf2, 0x55, 0x19, 0x65, 0x15, 0xb4, 0x5c, 0x4e, 0x9d, - 0x61, 0x37, 0x8f, 0x3f, 0x67, 0xc0, 0xac, 0x33, 0xe2, 0xe0, 0xb4, 0x9d, 0xfe, 0x2f, 0x0d, 0xcc, - 0xed, 0xd4, 0xf7, 0x76, 0x03, 0xda, 0x26, 0x01, 0x7c, 0x0d, 0x66, 0x45, 0x44, 0x36, 0xe6, 0xb8, - 0xa8, 0x6d, 0x68, 0x9b, 0x85, 0xad, 0xfb, 0x46, 0x74, 0xbf, 0x03, 0x60, 0xc3, 0x3f, 0x6f, 0x88, - 0x0d, 0x66, 0x08, 0x6d, 0xa3, 0xfd, 0xc0, 0x38, 0x3c, 0x79, 0x43, 0x2c, 0x5e, 0x23, 0x1c, 0x9b, - 0xf0, 0x7d, 0xb7, 0x3c, 0xd1, 0xeb, 0x96, 0x41, 0xb4, 0x87, 0x06, 0xa8, 0xf0, 0x39, 0xc8, 0x33, - 0x9f, 0x58, 0xc5, 0x49, 0x89, 0x7e, 0xdb, 0xc8, 0xcc, 0x9e, 0x31, 0x88, 0xa8, 0xee, 0x13, 0xcb, - 0x9c, 0x57, 0x88, 0x79, 0xb1, 0x42, 0xd2, 0x5e, 0xff, 0xa7, 0x06, 0x16, 0x06, 0x5a, 0xfb, 0x94, - 0x71, 0xf8, 0xab, 0xa1, 0xd8, 0x8d, 0xab, 0xc5, 0x2e, 0xac, 0x65, 0xe4, 0x4b, 0xca, 0xcf, 0x6c, - 0x7f, 0x27, 0x16, 0x77, 0x15, 0x4c, 0x51, 0x4e, 0x1c, 0x56, 0x9c, 0xdc, 0xc8, 0x6d, 0x16, 0xb6, - 0x36, 0x3e, 0x17, 0xb8, 0xb9, 0xa0, 0xc0, 0xa6, 0xf6, 0x84, 0x19, 0x0a, 0xad, 0xf5, 0xbf, 0xe5, - 0x63, 0x61, 0x8b, 0xe3, 0xc0, 0xa7, 0xe0, 0x06, 0xe6, 0x1c, 0x5b, 0x67, 0x88, 0xbc, 0x6d, 0xd1, - 0x80, 0xd8, 0x32, 0xf8, 0x59, 0x13, 0xf6, 0xba, 0xe5, 0x1b, 0xdb, 0x09, 0x09, 0x4a, 0x69, 0x0a, - 0x5b, 0xdf, 0xb3, 0xf7, 0xdc, 0x53, 0xef, 0xd0, 0xad, 0x79, 0x2d, 0x97, 0xcb, 0x6b, 0x55, 0xb6, - 0x47, 0x09, 0x09, 0x4a, 0x69, 0x42, 0x0b, 0xac, 0xb6, 0xbd, 0x66, 0xcb, 0x21, 0xfb, 0xf4, 0x94, - 0x58, 0x1d, 0xab, 0x49, 0x6a, 0x9e, 0x4d, 0x58, 0x31, 0xb7, 0x91, 0xdb, 0x9c, 0x33, 0x2b, 0xbd, - 0x6e, 0x79, 0xf5, 0x65, 0x86, 0xfc, 0xa2, 0x5b, 0x5e, 0xc9, 0xd8, 0x47, 0x99, 0x60, 0xf0, 0x19, - 0x58, 0x54, 0x97, 0xb3, 0x83, 0x7d, 0x6c, 0x51, 0xde, 0x29, 0xe6, 0x65, 0x84, 0x2b, 0xbd, 0x6e, - 0x79, 0xb1, 0x9e, 0x14, 0xa1, 0xb4, 0x2e, 0x7c, 0x01, 0x16, 0x4e, 0xd9, 0x4f, 0x03, 0xaf, 0xe5, - 0x1f, 0x79, 0x4d, 0x6a, 0x75, 0x8a, 0x53, 0x1b, 0xda, 0xe6, 0x9c, 0xa9, 0xf7, 0xba, 0xe5, 0x85, - 0xe7, 0xf5, 0x98, 0xe0, 0x22, 0xbd, 0x81, 0x92, 0x86, 0xf0, 0x35, 0x58, 0xe0, 0xde, 0x39, 0x71, - 0xc5, 0xd5, 0x11, 0xc6, 0x59, 0x71, 0x5a, 0xa6, 0xf1, 0xd6, 0x88, 0x34, 0x1e, 0xc7, 0x74, 0xcd, - 0x35, 0x95, 0xc9, 0x85, 0xf8, 0x2e, 0x43, 0x49, 0x40, 0xb8, 0x03, 0x96, 0x83, 0x30, 0x2f, 0x0c, - 0x11, 0xbf, 0x75, 0xd2, 0xa4, 0xec, 0xac, 0x38, 0x23, 0x0f, 0xbb, 0xd6, 0xeb, 0x96, 0x97, 0x51, - 0x5a, 0x88, 0x86, 0xf5, 0xf5, 0x7f, 0x68, 0x60, 0x66, 0xa7, 0xbe, 0x77, 0xe0, 0xd9, 0x64, 0x0c, - 0xb5, 0xb8, 0x9b, 0xa8, 0x45, 0x7d, 0x34, 0xa5, 0x45, 0x3c, 0x23, 0x2b, 0xf1, 0xeb, 0xb0, 0x12, - 0x85, 0x8e, 0xea, 0x22, 0x1b, 0x20, 0xef, 0x62, 0x87, 0xc8, 0xa8, 0xe7, 0x22, 0x9b, 0x03, 0xec, - 0x10, 0x24, 0x25, 0xf0, 0x7b, 0x60, 0xda, 0xf5, 0x6c, 0xb2, 0xb7, 0x2b, 0x7d, 0xcf, 0x99, 0x37, - 0x94, 0xce, 0xf4, 0x81, 0xdc, 0x45, 0x4a, 0x0a, 0x1f, 0x81, 0x79, 0xee, 0xf9, 0x5e, 0xd3, 0x6b, - 0x74, 0x7e, 0x46, 0x3a, 0x7d, 0x72, 0x2e, 0xf5, 0xba, 0xe5, 0xf9, 0xe3, 0xd8, 0x3e, 0x4a, 0x68, - 0xc1, 0x5f, 0x83, 0x02, 0x6e, 0x36, 0x3d, 0x0b, 0x73, 0x7c, 0xd2, 0x24, 0x92, 0x71, 0x85, 0xad, - 0xbb, 0x23, 0x8e, 0x17, 0x92, 0x59, 0xf8, 0x45, 0xaa, 0x85, 0x33, 0x73, 0xb1, 0xd7, 0x2d, 0x17, - 0xb6, 0x23, 0x08, 0x14, 0xc7, 0xd3, 0xff, 0xae, 0x81, 0x82, 0x3a, 0xf0, 0x18, 0x1a, 0xcf, 0x4e, - 0xb2, 0xf1, 0x94, 0x2e, 0xcf, 0xd2, 0x88, 0xb6, 0xf3, 0x9b, 0x41, 0xc4, 0xb2, 0xe7, 0x1c, 0x82, - 0x19, 0x5b, 0xa6, 0x8a, 0x15, 0x35, 0x89, 0x7a, 0xfb, 0x72, 0x54, 0xd5, 0xd2, 0x16, 0x15, 0xf6, - 0x4c, 0xb8, 0x66, 0xa8, 0x8f, 0xa2, 0x7f, 0x93, 0x03, 0x70, 0xa7, 0xbe, 0x97, 0x2a, 0xe8, 0x31, - 0x50, 0x98, 0x82, 0x79, 0x41, 0x95, 0x3e, 0x19, 0x14, 0x95, 0x1f, 0x5e, 0xf1, 0xfe, 0xf1, 0x09, - 0x69, 0xd6, 0x49, 0x93, 0x58, 0xdc, 0x0b, 0x42, 0x56, 0x1d, 0xc4, 0xc0, 0x50, 0x02, 0x1a, 0xee, - 0x82, 0xa5, 0x7e, 0x7f, 0x6a, 0x62, 0xc6, 0x04, 0x9b, 0x8b, 0x39, 0xc9, 0xde, 0xa2, 0x0a, 0x71, - 0xa9, 0x9e, 0x92, 0xa3, 0x21, 0x0b, 0xf8, 0x0a, 0xcc, 0x5a, 0xf1, 0x56, 0xf8, 0x19, 0xb2, 0x18, - 0xfd, 0xb9, 0xc2, 0xf8, 0x79, 0x0b, 0xbb, 0x9c, 0xf2, 0x8e, 0x39, 0x2f, 0x88, 0x32, 0xe8, 0x99, - 0x03, 0x34, 0xc8, 0xc0, 0xb2, 0x83, 0xdf, 0x51, 0xa7, 0xe5, 0x84, 0x94, 0xae, 0xd3, 0xdf, 0x11, - 0xd9, 0x30, 0xaf, 0xef, 0x42, 0x36, 0xac, 0x5a, 0x1a, 0x0c, 0x0d, 0xe3, 0xeb, 0xff, 0xd5, 0xc0, - 0xcd, 0xe1, 0xc4, 0x8f, 0xa1, 0x2c, 0x0e, 0x92, 0x65, 0x71, 0x67, 0x34, 0x81, 0x53, 0xb1, 0x8d, - 0xa8, 0x90, 0x3f, 0x4d, 0x83, 0xf9, 0x78, 0xfa, 0xc6, 0xc0, 0xdd, 0x1f, 0x81, 0x82, 0x1f, 0x78, - 0x6d, 0xca, 0xa8, 0xe7, 0x92, 0x40, 0x75, 0xc2, 0x15, 0x65, 0x52, 0x38, 0x8a, 0x44, 0x28, 0xae, - 0x07, 0x1b, 0x00, 0xf8, 0x38, 0xc0, 0x0e, 0xe1, 0xa2, 0x7e, 0x73, 0xf2, 0xf8, 0x0f, 0x47, 0x1c, - 0x3f, 0x7e, 0x22, 0xe3, 0x68, 0x60, 0x55, 0x75, 0x79, 0xd0, 0x89, 0xa2, 0x8b, 0x04, 0x28, 0x06, - 0x0d, 0xcf, 0xc1, 0x42, 0x40, 0xac, 0x26, 0xa6, 0x8e, 0x7a, 0x7d, 0xf3, 0x32, 0xc2, 0xaa, 0x78, - 0x0a, 0x51, 0x5c, 0x70, 0xd1, 0x2d, 0xdf, 0x1f, 0x9e, 0x9f, 0x8d, 0x23, 0x12, 0x30, 0xca, 0x38, - 0x71, 0x79, 0x48, 0x98, 0x84, 0x0d, 0x4a, 0x62, 0x8b, 0x4e, 0xef, 0x88, 0xb9, 0xe4, 0xd0, 0xe7, - 0xd4, 0x73, 0x59, 0x71, 0x2a, 0xea, 0xf4, 0xb5, 0xd8, 0x3e, 0x4a, 0x68, 0xc1, 0x7d, 0xb0, 0x2a, - 0x3a, 0xf3, 0x6f, 0x43, 0x07, 0xd5, 0x77, 0x3e, 0x76, 0xc5, 0x2d, 0x15, 0xa7, 0xe5, 0xbb, 0x5b, - 0x14, 0x43, 0xcc, 0x76, 0x86, 0x1c, 0x65, 0x5a, 0xc1, 0x57, 0x60, 0x39, 0x9c, 0x62, 0x4c, 0xea, - 0xda, 0xd4, 0x6d, 0x88, 0x19, 0x46, 0x3e, 0xe1, 0x73, 0xe6, 0x5d, 0x51, 0x11, 0x2f, 0xd3, 0xc2, - 0x8b, 0xac, 0x4d, 0x34, 0x0c, 0x02, 0xdf, 0x82, 0x65, 0xe9, 0x91, 0xd8, 0xaa, 0x9d, 0x50, 0xc2, - 0x8a, 0xb3, 0x32, 0x75, 0x9b, 0xf1, 0xd4, 0x89, 0xab, 0x0b, 0xe7, 0x8f, 0xb0, 0xe9, 0xf4, 0x9b, - 0xd3, 0x31, 0x09, 0x1c, 0xf3, 0xbb, 0x2a, 0x5f, 0xcb, 0xdb, 0x69, 0x28, 0x34, 0x8c, 0xbe, 0xfe, - 0x0c, 0x2c, 0xa6, 0x12, 0x0e, 0x97, 0x40, 0xee, 0x9c, 0x74, 0xc2, 0x67, 0x19, 0x89, 0x9f, 0x70, - 0x15, 0x4c, 0xb5, 0x71, 0xb3, 0x45, 0x42, 0xf2, 0xa1, 0x70, 0xf1, 0x74, 0xf2, 0x89, 0xa6, 0xff, - 0x5b, 0x03, 0x89, 0x76, 0x36, 0x86, 0x92, 0x7e, 0x91, 0x2c, 0xe9, 0x5b, 0x57, 0xe0, 0xf4, 0x88, - 0x62, 0xfe, 0x83, 0x06, 0xe6, 0xe3, 0xc3, 0x1a, 0xfc, 0x21, 0x98, 0xc5, 0x2d, 0x9b, 0x12, 0xd7, - 0xea, 0x4f, 0x25, 0x83, 0x40, 0xb6, 0xd5, 0x3e, 0x1a, 0x68, 0x88, 0x51, 0x8e, 0xbc, 0xf3, 0x69, - 0x80, 0x05, 0xc9, 0xea, 0xc4, 0xf2, 0x5c, 0x9b, 0xc9, 0x1b, 0xca, 0x85, 0x9d, 0xb1, 0x9a, 0x16, - 0xa2, 0x61, 0x7d, 0xfd, 0xaf, 0x93, 0x60, 0x29, 0xe4, 0x46, 0x38, 0xc4, 0x3b, 0xc4, 0xe5, 0x63, - 0x68, 0x2a, 0xb5, 0xc4, 0x4c, 0xf7, 0x83, 0x4b, 0x87, 0x9e, 0x28, 0xb0, 0x51, 0xc3, 0x1d, 0xfc, - 0x05, 0x98, 0x66, 0x1c, 0xf3, 0x16, 0x93, 0x4f, 0x5d, 0x61, 0xeb, 0xde, 0x55, 0x01, 0xa5, 0x51, - 0x34, 0xd7, 0x85, 0x6b, 0xa4, 0xc0, 0xf4, 0xff, 0x68, 0x60, 0x35, 0x6d, 0x32, 0x06, 0x86, 0xed, - 0x27, 0x19, 0xf6, 0xfd, 0x2b, 0x1e, 0x66, 0x04, 0xcb, 0xfe, 0xaf, 0x81, 0x9b, 0x43, 0xe7, 0x96, - 0x2f, 0xa9, 0xe8, 0x4b, 0x7e, 0xaa, 0xfb, 0x1d, 0x44, 0x13, 0xb1, 0xec, 0x4b, 0x47, 0x19, 0x72, - 0x94, 0x69, 0x05, 0xdf, 0x80, 0x25, 0xea, 0x36, 0xa9, 0x4b, 0xd4, 0xc3, 0x1b, 0xe5, 0x37, 0xb3, - 0x79, 0xa4, 0x91, 0x65, 0x72, 0x57, 0xc5, 0x7c, 0xb2, 0x97, 0x42, 0x41, 0x43, 0xb8, 0xfa, 0xff, - 0x32, 0x32, 0x23, 0x67, 0x46, 0x51, 0x42, 0x72, 0x87, 0x04, 0x43, 0x25, 0xa4, 0xf6, 0xd1, 0x40, - 0x43, 0xf2, 0x46, 0x5e, 0x85, 0x0a, 0xf4, 0xca, 0xbc, 0x91, 0x46, 0x31, 0xde, 0xc8, 0x35, 0x52, - 0x60, 0x22, 0x08, 0x31, 0x93, 0xc5, 0x66, 0xaf, 0x41, 0x10, 0x07, 0x6a, 0x1f, 0x0d, 0x34, 0xf4, - 0xaf, 0x72, 0x19, 0x09, 0x92, 0x04, 0x8c, 0x9d, 0xa6, 0xff, 0xbd, 0x9d, 0x3e, 0x8d, 0x3d, 0x38, - 0x8d, 0x0d, 0xff, 0xa2, 0x01, 0x88, 0x07, 0x10, 0xb5, 0x3e, 0x41, 0x43, 0x16, 0x55, 0xaf, 0x55, - 0x12, 0xc6, 0xf6, 0x10, 0x4e, 0xf8, 0x1a, 0xaf, 0x2b, 0xff, 0x70, 0x58, 0x01, 0x65, 0x38, 0x87, - 0x36, 0x28, 0x84, 0xbb, 0xd5, 0x20, 0xf0, 0x02, 0x55, 0x9e, 0xfa, 0xa5, 0xb1, 0x48, 0x4d, 0xb3, - 0x24, 0x3f, 0x6e, 0x22, 0xd3, 0x8b, 0x6e, 0xb9, 0x10, 0x93, 0xa3, 0x38, 0xac, 0xf0, 0x62, 0x93, - 0xc8, 0x4b, 0xfe, 0x7a, 0x5e, 0x76, 0xc9, 0x68, 0x2f, 0x31, 0xd8, 0xf5, 0x2a, 0xf8, 0xce, 0x88, - 0x6b, 0xb9, 0xd6, 0x9b, 0xf5, 0x47, 0x0d, 0xc4, 0x7d, 0xc0, 0x7d, 0x90, 0xe7, 0x54, 0x55, 0x5d, - 0xf2, 0x03, 0xf0, 0x92, 0x46, 0x72, 0x4c, 0x1d, 0x12, 0xb5, 0x42, 0xb1, 0x42, 0x12, 0x05, 0xde, - 0x01, 0x33, 0x0e, 0x61, 0x0c, 0x37, 0x94, 0xe7, 0xe8, 0x73, 0xa8, 0x16, 0x6e, 0xa3, 0xbe, 0x5c, - 0x7f, 0x0c, 0x56, 0x32, 0x3e, 0x2b, 0x61, 0x19, 0x4c, 0x59, 0xf2, 0x5f, 0x1a, 0x11, 0xd0, 0x94, - 0x39, 0x27, 0x3a, 0xca, 0x8e, 0xfc, 0x73, 0x26, 0xdc, 0x37, 0x7f, 0xfc, 0xfe, 0x53, 0x69, 0xe2, - 0xc3, 0xa7, 0xd2, 0xc4, 0xc7, 0x4f, 0xa5, 0x89, 0xdf, 0xf7, 0x4a, 0xda, 0xfb, 0x5e, 0x49, 0xfb, - 0xd0, 0x2b, 0x69, 0x1f, 0x7b, 0x25, 0xed, 0x8b, 0x5e, 0x49, 0xfb, 0xf3, 0x97, 0xa5, 0x89, 0x5f, - 0xae, 0x65, 0xfe, 0x31, 0xfa, 0x6d, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd7, 0x6e, 0x72, 0x7b, 0x49, - 0x15, 0x00, 0x00, + // 1670 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x73, 0x1b, 0x4b, + 0x11, 0xf7, 0x5a, 0xf2, 0xd7, 0xc8, 0x8e, 0xed, 0xb1, 0xfd, 0x10, 0x3e, 0x48, 0xae, 0x7d, 0xaf, + 0xc0, 0xef, 0xc1, 0x5b, 0xbd, 0x38, 0x21, 0x95, 0x0a, 0x15, 0xaa, 0xbc, 0xb6, 0x42, 0x5c, 0x58, + 0xb6, 0x19, 0x99, 0x54, 0x8a, 0x02, 0x2a, 0xe3, 0xdd, 0xb1, 0x3c, 0xb1, 0xf6, 0x23, 0x3b, 0xb3, + 0xc2, 0xe2, 0x04, 0x17, 0x6e, 0x54, 0xc1, 0x95, 0xbf, 0x02, 0xaa, 0xe0, 0xc2, 0x91, 0x03, 0x15, + 0x6e, 0x29, 0x4e, 0x39, 0xa9, 0x88, 0x38, 0xc3, 0x91, 0x83, 0x4f, 0xaf, 0x66, 0x76, 0xa4, 0xfd, + 0xd0, 0xca, 0xb1, 0x2f, 0xba, 0x69, 0xa6, 0xbb, 0x7f, 0xdd, 0x33, 0xdd, 0xfd, 0x9b, 0x5e, 0x81, + 0x1f, 0x5c, 0x3e, 0x66, 0x06, 0xf5, 0x6a, 0x97, 0xe1, 0x19, 0x09, 0x5c, 0xc2, 0x09, 0xab, 0x75, + 0x88, 0x6b, 0x7b, 0x41, 0x4d, 0x09, 0xb0, 0x4f, 0x6b, 0x8c, 0x7b, 0x01, 0x6e, 0x91, 0x5a, 0xe7, + 0x7e, 0xad, 0x45, 0x5c, 0x12, 0x60, 0x4e, 0x6c, 0xc3, 0x0f, 0x3c, 0xee, 0xc1, 0x8d, 0x48, 0xcd, + 0xc0, 0x3e, 0x35, 0x94, 0x9a, 0xd1, 0xb9, 0xbf, 0xf9, 0x65, 0x8b, 0xf2, 0x8b, 0xf0, 0xcc, 0xb0, + 0x3c, 0xa7, 0xd6, 0xf2, 0x5a, 0x5e, 0x4d, 0x6a, 0x9f, 0x85, 0xe7, 0x72, 0x25, 0x17, 0xf2, 0x57, + 0x84, 0xb2, 0xa9, 0x27, 0x9c, 0x59, 0x5e, 0x90, 0xe7, 0x69, 0xf3, 0x61, 0xac, 0xe3, 0x60, 0xeb, + 0x82, 0xba, 0x24, 0xe8, 0xd6, 0xfc, 0xcb, 0x96, 0x34, 0x0a, 0x08, 0xf3, 0xc2, 0xc0, 0x22, 0x77, + 0xb2, 0x62, 0x35, 0x87, 0x70, 0x9c, 0xe7, 0xab, 0x36, 0xce, 0x2a, 0x08, 0x5d, 0x4e, 0x9d, 0x51, + 0x37, 0x8f, 0x3e, 0x66, 0xc0, 0xac, 0x0b, 0xe2, 0xe0, 0xac, 0x9d, 0xfe, 0x57, 0x0d, 0x2c, 0xec, + 0x35, 0x0f, 0xf6, 0x03, 0xda, 0x21, 0x01, 0x7c, 0x05, 0xe6, 0x45, 0x44, 0x36, 0xe6, 0xb8, 0xac, + 0x6d, 0x69, 0xdb, 0xa5, 0x9d, 0xaf, 0x8c, 0xf8, 0x7e, 0x87, 0xc0, 0x86, 0x7f, 0xd9, 0x12, 0x1b, + 0xcc, 0x10, 0xda, 0x46, 0xe7, 0xbe, 0x71, 0x7c, 0xf6, 0x9a, 0x58, 0xbc, 0x41, 0x38, 0x36, 0xe1, + 0xdb, 0x5e, 0x75, 0xaa, 0xdf, 0xab, 0x82, 0x78, 0x0f, 0x0d, 0x51, 0xe1, 0x33, 0x50, 0x64, 0x3e, + 0xb1, 0xca, 0xd3, 0x12, 0xfd, 0x33, 0x23, 0x37, 0x7b, 0xc6, 0x30, 0xa2, 0xa6, 0x4f, 0x2c, 0x73, + 0x51, 0x21, 0x16, 0xc5, 0x0a, 0x49, 0x7b, 0xfd, 0x2f, 0x1a, 0x58, 0x1a, 0x6a, 0x1d, 0x52, 0xc6, + 0xe1, 0xcf, 0x46, 0x62, 0x37, 0x6e, 0x17, 0xbb, 0xb0, 0x96, 0x91, 0xaf, 0x28, 0x3f, 0xf3, 0x83, + 0x9d, 0x44, 0xdc, 0x75, 0x30, 0x43, 0x39, 0x71, 0x58, 0x79, 0x7a, 0xab, 0xb0, 0x5d, 0xda, 0xd9, + 0xfa, 0x58, 0xe0, 0xe6, 0x92, 0x02, 0x9b, 0x39, 0x10, 0x66, 0x28, 0xb2, 0xd6, 0xff, 0x55, 0x4c, + 0x84, 0x2d, 0x8e, 0x03, 0x9f, 0x80, 0x7b, 0x98, 0x73, 0x6c, 0x5d, 0x20, 0xf2, 0x26, 0xa4, 0x01, + 0xb1, 0x65, 0xf0, 0xf3, 0x26, 0xec, 0xf7, 0xaa, 0xf7, 0x76, 0x53, 0x12, 0x94, 0xd1, 0x14, 0xb6, + 0xbe, 0x67, 0x1f, 0xb8, 0xe7, 0xde, 0xb1, 0xdb, 0xf0, 0x42, 0x97, 0xcb, 0x6b, 0x55, 0xb6, 0x27, + 0x29, 0x09, 0xca, 0x68, 0x42, 0x0b, 0xac, 0x77, 0xbc, 0x76, 0xe8, 0x90, 0x43, 0x7a, 0x4e, 0xac, + 0xae, 0xd5, 0x26, 0x0d, 0xcf, 0x26, 0xac, 0x5c, 0xd8, 0x2a, 0x6c, 0x2f, 0x98, 0xb5, 0x7e, 0xaf, + 0xba, 0xfe, 0x22, 0x47, 0x7e, 0xdd, 0xab, 0xae, 0xe5, 0xec, 0xa3, 0x5c, 0x30, 0xf8, 0x14, 0x2c, + 0xab, 0xcb, 0xd9, 0xc3, 0x3e, 0xb6, 0x28, 0xef, 0x96, 0x8b, 0x32, 0xc2, 0xb5, 0x7e, 0xaf, 0xba, + 0xdc, 0x4c, 0x8b, 0x50, 0x56, 0x17, 0x3e, 0x07, 0x4b, 0xe7, 0xec, 0x87, 0x81, 0x17, 0xfa, 0x27, + 0x5e, 0x9b, 0x5a, 0xdd, 0xf2, 0xcc, 0x96, 0xb6, 0xbd, 0x60, 0xea, 0xfd, 0x5e, 0x75, 0xe9, 0x59, + 0x33, 0x21, 0xb8, 0xce, 0x6e, 0xa0, 0xb4, 0x21, 0x7c, 0x05, 0x96, 0xb8, 0x77, 0x49, 0x5c, 0x71, + 0x75, 0x84, 0x71, 0x56, 0x9e, 0x95, 0x69, 0xfc, 0x74, 0x4c, 0x1a, 0x4f, 0x13, 0xba, 0xe6, 0x86, + 0xca, 0xe4, 0x52, 0x72, 0x97, 0xa1, 0x34, 0x20, 0xdc, 0x03, 0xab, 0x41, 0x94, 0x17, 0x86, 0x88, + 0x1f, 0x9e, 0xb5, 0x29, 0xbb, 0x28, 0xcf, 0xc9, 0xc3, 0x6e, 0xf4, 0x7b, 0xd5, 0x55, 0x94, 0x15, + 0xa2, 0x51, 0x7d, 0xf8, 0x10, 0x2c, 0x32, 0x72, 0x48, 0xdd, 0xf0, 0x2a, 0x4a, 0xe7, 0xbc, 0xb4, + 0x5f, 0xe9, 0xf7, 0xaa, 0x8b, 0xcd, 0x7a, 0xbc, 0x8f, 0x52, 0x5a, 0xfa, 0x9f, 0x35, 0x30, 0xb7, + 0xd7, 0x3c, 0x38, 0xf2, 0x6c, 0x32, 0x81, 0x0e, 0xde, 0x4f, 0x75, 0xb0, 0x3e, 0xbe, 0x11, 0x44, + 0x3c, 0x63, 0xfb, 0xf7, 0x7f, 0x51, 0xff, 0x0a, 0x1d, 0xc5, 0x3d, 0x5b, 0xa0, 0xe8, 0x62, 0x87, + 0xc8, 0xa8, 0x17, 0x62, 0x9b, 0x23, 0xec, 0x10, 0x24, 0x25, 0xf0, 0x5b, 0x60, 0xd6, 0xf5, 0x6c, + 0x72, 0xb0, 0x2f, 0x7d, 0x2f, 0x98, 0xf7, 0x94, 0xce, 0xec, 0x91, 0xdc, 0x45, 0x4a, 0x2a, 0x6e, + 0x91, 0x7b, 0xbe, 0xd7, 0xf6, 0x5a, 0xdd, 0x1f, 0x91, 0xee, 0xa0, 0xa4, 0xe5, 0x2d, 0x9e, 0x26, + 0xf6, 0x51, 0x4a, 0x0b, 0xfe, 0x1c, 0x94, 0x70, 0xbb, 0xed, 0x59, 0x98, 0xe3, 0xb3, 0x36, 0x91, + 0x75, 0x5a, 0xda, 0xf9, 0x62, 0xcc, 0xf1, 0xa2, 0x16, 0x10, 0x7e, 0x91, 0x22, 0x7e, 0x66, 0x2e, + 0xf7, 0x7b, 0xd5, 0xd2, 0x6e, 0x0c, 0x81, 0x92, 0x78, 0xfa, 0x9f, 0x34, 0x50, 0x52, 0x07, 0x9e, + 0x00, 0x5d, 0xed, 0xa5, 0xe9, 0xaa, 0x72, 0x73, 0x96, 0xc6, 0x90, 0xd5, 0x2f, 0x86, 0x11, 0x4b, + 0xa6, 0x3a, 0x06, 0x73, 0xb6, 0x4c, 0x15, 0x2b, 0x6b, 0x12, 0xf5, 0xb3, 0x9b, 0x51, 0x15, 0x11, + 0x2e, 0x2b, 0xec, 0xb9, 0x68, 0xcd, 0xd0, 0x00, 0x45, 0xff, 0x7f, 0x01, 0xc0, 0xbd, 0xe6, 0x41, + 0x86, 0x06, 0x26, 0x50, 0xc2, 0x14, 0x2c, 0x8a, 0x52, 0x19, 0x14, 0x83, 0x2a, 0xe5, 0x07, 0xb7, + 0xbc, 0x7f, 0x7c, 0x46, 0xda, 0x4d, 0xd2, 0x26, 0x16, 0xf7, 0x82, 0xa8, 0xaa, 0x8e, 0x12, 0x60, + 0x28, 0x05, 0x0d, 0xf7, 0xc1, 0xca, 0x80, 0xd5, 0xda, 0x98, 0x31, 0x51, 0xcd, 0xe5, 0x82, 0xac, + 0xde, 0xb2, 0x0a, 0x71, 0xa5, 0x99, 0x91, 0xa3, 0x11, 0x0b, 0xf8, 0x12, 0xcc, 0x5b, 0x49, 0x02, + 0xfd, 0x48, 0xb1, 0x18, 0x83, 0x69, 0xc4, 0xf8, 0x71, 0x88, 0x5d, 0x4e, 0x79, 0xd7, 0x5c, 0x14, + 0x85, 0x32, 0x64, 0xda, 0x21, 0x1a, 0x64, 0x60, 0xd5, 0xc1, 0x57, 0xd4, 0x09, 0x9d, 0xa8, 0xa4, + 0x9b, 0xf4, 0x57, 0x44, 0xd2, 0xec, 0xdd, 0x5d, 0x48, 0x9a, 0x6b, 0x64, 0xc1, 0xd0, 0x28, 0xbe, + 0xfe, 0x0f, 0x0d, 0x7c, 0x32, 0x9a, 0xf8, 0x09, 0xb4, 0xc5, 0x51, 0xba, 0x2d, 0x3e, 0x1f, 0x5f, + 0xc0, 0x99, 0xd8, 0xc6, 0x74, 0xc8, 0xef, 0x66, 0xc1, 0x62, 0x32, 0x7d, 0x13, 0xa8, 0xdd, 0xef, + 0x81, 0x92, 0x1f, 0x78, 0x1d, 0xca, 0xa8, 0xe7, 0x92, 0x40, 0x31, 0xe1, 0x9a, 0x32, 0x29, 0x9d, + 0xc4, 0x22, 0x94, 0xd4, 0x83, 0x2d, 0x00, 0x7c, 0x1c, 0x60, 0x87, 0x70, 0xd1, 0xbf, 0x05, 0x79, + 0xfc, 0x07, 0x63, 0x8e, 0x9f, 0x3c, 0x91, 0x71, 0x32, 0xb4, 0xaa, 0xbb, 0x3c, 0xe8, 0xc6, 0xd1, + 0xc5, 0x02, 0x94, 0x80, 0x86, 0x97, 0x60, 0x29, 0x20, 0x56, 0x1b, 0x53, 0x47, 0xbd, 0xd9, 0x45, + 0x19, 0x61, 0x5d, 0x3c, 0xa0, 0x28, 0x29, 0xb8, 0xee, 0x55, 0xbf, 0x1a, 0x9d, 0xba, 0x8d, 0x13, + 0x12, 0x30, 0xca, 0x38, 0x71, 0x79, 0x54, 0x30, 0x29, 0x1b, 0x94, 0xc6, 0x16, 0x4c, 0xef, 0x88, + 0x27, 0xf0, 0xd8, 0xe7, 0xd4, 0x73, 0x59, 0x79, 0x26, 0x66, 0xfa, 0x46, 0x62, 0x1f, 0xa5, 0xb4, + 0xe0, 0x21, 0x58, 0x17, 0xcc, 0xfc, 0xcb, 0xc8, 0x41, 0xfd, 0xca, 0xc7, 0xae, 0xb8, 0xa5, 0xf2, + 0xac, 0x7c, 0x6d, 0xcb, 0x62, 0xf4, 0xd9, 0xcd, 0x91, 0xa3, 0x5c, 0x2b, 0xf8, 0x12, 0xac, 0x46, + 0xb3, 0x8f, 0x49, 0x5d, 0x9b, 0xba, 0x2d, 0x31, 0xf9, 0xc8, 0x87, 0x7f, 0xc1, 0xfc, 0x42, 0x74, + 0xc4, 0x8b, 0xac, 0xf0, 0x3a, 0x6f, 0x13, 0x8d, 0x82, 0xc0, 0x37, 0x60, 0x55, 0x7a, 0x24, 0xb6, + 0xa2, 0x13, 0x4a, 0x58, 0x79, 0x5e, 0xa6, 0x6e, 0x3b, 0x99, 0x3a, 0x71, 0x75, 0xd1, 0xd4, 0x12, + 0x91, 0xce, 0x80, 0x9c, 0x4e, 0x49, 0xe0, 0x98, 0xdf, 0x54, 0xf9, 0x5a, 0xdd, 0xcd, 0x42, 0xa1, + 0x51, 0xf4, 0xcd, 0xa7, 0x60, 0x39, 0x93, 0x70, 0xb8, 0x02, 0x0a, 0x97, 0xa4, 0x1b, 0x3d, 0xcb, + 0x48, 0xfc, 0x84, 0xeb, 0x60, 0xa6, 0x83, 0xdb, 0x21, 0x89, 0x8a, 0x0f, 0x45, 0x8b, 0x27, 0xd3, + 0x8f, 0x35, 0xfd, 0x6f, 0x1a, 0x48, 0xd1, 0xd9, 0x04, 0x5a, 0xfa, 0x79, 0xba, 0xa5, 0x3f, 0xbd, + 0x45, 0x4d, 0x8f, 0x69, 0xe6, 0xdf, 0x68, 0x60, 0x31, 0x39, 0xe2, 0xc1, 0xef, 0x82, 0x79, 0x1c, + 0xda, 0x94, 0xb8, 0xd6, 0x60, 0x2a, 0x19, 0x06, 0xb2, 0xab, 0xf6, 0xd1, 0x50, 0x43, 0x0c, 0x80, + 0xe4, 0xca, 0xa7, 0x01, 0x16, 0x45, 0xd6, 0x24, 0x96, 0xe7, 0xda, 0x4c, 0xde, 0x50, 0x21, 0x62, + 0xc6, 0x7a, 0x56, 0x88, 0x46, 0xf5, 0xf5, 0x3f, 0x4e, 0x83, 0x95, 0xa8, 0x36, 0xa2, 0xd1, 0xdf, + 0x21, 0x2e, 0x9f, 0x00, 0xa9, 0x34, 0x52, 0x33, 0xdd, 0x77, 0x6e, 0x1c, 0x7a, 0xe2, 0xc0, 0xc6, + 0x0d, 0x77, 0xf0, 0x27, 0x60, 0x96, 0x71, 0xcc, 0x43, 0x26, 0x9f, 0xba, 0xd2, 0xce, 0x97, 0xb7, + 0x05, 0x94, 0x46, 0xf1, 0x5c, 0x17, 0xad, 0x91, 0x02, 0xd3, 0xff, 0xae, 0x81, 0xf5, 0xac, 0xc9, + 0x04, 0x2a, 0xec, 0x30, 0x5d, 0x61, 0xdf, 0xbe, 0xe5, 0x61, 0xc6, 0x7d, 0x01, 0x6a, 0xe0, 0x93, + 0x91, 0x73, 0xcb, 0x97, 0x54, 0xf0, 0x92, 0x9f, 0x61, 0xbf, 0xa3, 0x78, 0x22, 0x96, 0xbc, 0x74, + 0x92, 0x23, 0x47, 0xb9, 0x56, 0xf0, 0x35, 0x58, 0xa1, 0x6e, 0x9b, 0xba, 0x44, 0x3d, 0xbc, 0x71, + 0x7e, 0x73, 0xc9, 0x23, 0x8b, 0x2c, 0x93, 0xbb, 0x2e, 0xe6, 0x93, 0x83, 0x0c, 0x0a, 0x1a, 0xc1, + 0xd5, 0xff, 0x99, 0x93, 0x19, 0x39, 0x33, 0x8a, 0x16, 0x92, 0x3b, 0x24, 0x18, 0x69, 0x21, 0xb5, + 0x8f, 0x86, 0x1a, 0xb2, 0x6e, 0xe4, 0x55, 0xa8, 0x40, 0x6f, 0x5d, 0x37, 0xd2, 0x28, 0x51, 0x37, + 0x72, 0x8d, 0x14, 0x98, 0x08, 0x42, 0xcc, 0x64, 0x89, 0xd9, 0x6b, 0x18, 0xc4, 0x91, 0xda, 0x47, + 0x43, 0x0d, 0xfd, 0xbf, 0x85, 0x9c, 0x04, 0xc9, 0x02, 0x4c, 0x9c, 0x66, 0xf0, 0x95, 0x9e, 0x3d, + 0x8d, 0x3d, 0x3c, 0x8d, 0x0d, 0xff, 0xa0, 0x01, 0x88, 0x87, 0x10, 0x8d, 0x41, 0x81, 0x46, 0x55, + 0x54, 0xbf, 0x53, 0x4b, 0x18, 0xbb, 0x23, 0x38, 0xd1, 0x6b, 0xbc, 0xa9, 0xfc, 0xc3, 0x51, 0x05, + 0x94, 0xe3, 0x1c, 0xda, 0xa0, 0x14, 0xed, 0xd6, 0x83, 0xc0, 0x0b, 0x54, 0x7b, 0xea, 0x37, 0xc6, + 0x22, 0x35, 0xcd, 0x8a, 0xfc, 0xb8, 0x89, 0x4d, 0xaf, 0x7b, 0xd5, 0x52, 0x42, 0x8e, 0x92, 0xb0, + 0xc2, 0x8b, 0x4d, 0x62, 0x2f, 0xc5, 0xbb, 0x79, 0xd9, 0x27, 0xe3, 0xbd, 0x24, 0x60, 0x37, 0xeb, + 0xe0, 0x1b, 0x63, 0xae, 0xe5, 0x4e, 0x6f, 0xd6, 0x6f, 0x35, 0x90, 0xf4, 0x01, 0x0f, 0x41, 0x91, + 0x53, 0xd5, 0x75, 0xe9, 0x0f, 0xc0, 0x1b, 0x88, 0xe4, 0x94, 0x3a, 0x24, 0xa6, 0x42, 0xb1, 0x42, + 0x12, 0x05, 0x7e, 0x0e, 0xe6, 0x1c, 0xc2, 0x18, 0x6e, 0x29, 0xcf, 0xf1, 0xe7, 0x50, 0x23, 0xda, + 0x46, 0x03, 0xb9, 0xfe, 0x08, 0xac, 0xe5, 0x7c, 0x56, 0xc2, 0x2a, 0x98, 0xb1, 0xe4, 0x9f, 0x01, + 0x22, 0xa0, 0x19, 0x73, 0x41, 0x30, 0xca, 0x9e, 0xfc, 0x17, 0x20, 0xda, 0x37, 0xbf, 0xff, 0xf6, + 0x43, 0x65, 0xea, 0xdd, 0x87, 0xca, 0xd4, 0xfb, 0x0f, 0x95, 0xa9, 0x5f, 0xf7, 0x2b, 0xda, 0xdb, + 0x7e, 0x45, 0x7b, 0xd7, 0xaf, 0x68, 0xef, 0xfb, 0x15, 0xed, 0xdf, 0xfd, 0x8a, 0xf6, 0xfb, 0xff, + 0x54, 0xa6, 0x7e, 0xba, 0x91, 0xfb, 0x77, 0xea, 0xd7, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0d, 0xf9, + 0xe3, 0xd5, 0x7f, 0x15, 0x00, 0x00, } func (m *CSIDriver) Marshal() (dAtA []byte, err error) { @@ -826,6 +827,16 @@ func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SELinuxMount != nil { + i-- + if *m.SELinuxMount { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } if m.RequiresRepublish != nil { i-- if *m.RequiresRepublish { @@ -1795,6 +1806,9 @@ func (m *CSIDriverSpec) Size() (n int) { if m.RequiresRepublish != nil { n += 2 } + if m.SELinuxMount != nil { + n += 2 + } return n } @@ -2148,6 +2162,7 @@ func (this *CSIDriverSpec) String() string { `FSGroupPolicy:` + valueToStringGenerated(this.FSGroupPolicy) + `,`, `TokenRequests:` + repeatedStringForTokenRequests + `,`, `RequiresRepublish:` + valueToStringGenerated(this.RequiresRepublish) + `,`, + `SELinuxMount:` + valueToStringGenerated(this.SELinuxMount) + `,`, `}`, }, "") return s @@ -2844,6 +2859,27 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.RequiresRepublish = &b + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxMount", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.SELinuxMount = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index 45417116a..5f8eccaef 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -46,7 +46,7 @@ message CSIDriver { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. optional CSIDriverSpec spec = 2; } @@ -79,16 +79,15 @@ message CSIDriverSpec { // +optional optional bool attachRequired = 1; - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name @@ -110,29 +109,27 @@ message CSIDriverSpec { optional bool podInfoOnMount = 2; // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html - // A driver can support one or more of these modes and - // more modes may be added in the future. - // This field is beta. + // A driver can support one or more of these modes and more modes may be added in the future. // + // This field is beta. // This field is immutable. // // +optional // +listType=set repeated string volumeLifecycleModes = 3; - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -149,7 +146,7 @@ message CSIDriverSpec { // +featureGate=CSIStorageCapacity optional bool storageCapacity = 4; - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -159,10 +156,11 @@ message CSIDriverSpec { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional optional string fsGroupPolicy = 5; - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -182,7 +180,7 @@ message CSIDriverSpec { // +listType=atomic repeated TokenRequest tokenRequests = 6; - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -192,6 +190,28 @@ message CSIDriverSpec { // // +optional optional bool requiresRepublish = 7; + + // seLinuxMount specifies if the CSI driver supports "-o context" + // mount option. + // + // When "true", the CSI driver must ensure that all volumes provided by this CSI + // driver can be mounted separately with different `-o context` options. This is + // typical for storage backends that provide volumes as filesystems on block + // devices or as independent shared volumes. + // Kubernetes will call NodeStage / NodePublish with "-o context=xyz" mount + // option when mounting a ReadWriteOncePod volume used in Pod that has + // explicitly set SELinux context. In the future, it may be expanded to other + // volume AccessModes. In any case, Kubernetes will ensure that the volume is + // mounted only with a single SELinux context. + // + // When "false", Kubernetes won't pass any special SELinux mount options to the driver. + // This is typical for volumes that represent subdirectories of a bigger shared filesystem. + // + // Default is "false". + // + // +featureGate=SELinuxMountReadWriteOncePod + // +optional + optional bool seLinuxMount = 8; } // CSINode holds information about all CSI drivers installed on a node. @@ -204,6 +224,7 @@ message CSIDriverSpec { // enough that it doesn't create this object. // CSINode has an OwnerReference that points to the corresponding node object. message CSINode { + // Standard object's metadata. // metadata.name must be the Kubernetes node name. optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -213,7 +234,7 @@ message CSINode { // CSINodeDriver holds information about the specification of one CSI driver installed on a node message CSINodeDriver { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. optional string name = 1; @@ -293,11 +314,11 @@ message CSINodeSpec { // the scheduler assumes that capacity is insufficient and tries some other // node. message CSIStorageCapacity { - // Standard object's metadata. The name has no particular meaning. It must be - // be a DNS subdomain (dots allowed, 253 characters). To ensure that - // there are no conflicts with other CSI drivers on the cluster, the recommendation - // is to use csisc-, a generated name, or a reverse-domain name which ends - // with the unique CSI driver name. + // Standard object's metadata. + // The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters). + // To ensure that there are no conflicts with other CSI drivers on the cluster, + // the recommendation is to use csisc-, a generated name, or a reverse-domain name + // which ends with the unique CSI driver name. // // Objects are namespaced. // @@ -305,7 +326,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -314,7 +335,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -322,7 +343,7 @@ message CSIStorageCapacity { // This field is immutable. optional string storageClassName = 3; - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -334,7 +355,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -356,7 +377,7 @@ message CSIStorageCapacityList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name repeated CSIStorageCapacity items = 2; @@ -373,36 +394,36 @@ message StorageClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. optional string provisioner = 2; - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional map parameters = 3; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional optional string reclaimPolicy = 4; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional repeated string mountOptions = 5; - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand. // +optional optional bool allowVolumeExpansion = 6; - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional optional string volumeBindingMode = 7; - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -418,17 +439,17 @@ message StorageClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of StorageClasses + // items is the list of StorageClasses repeated StorageClass items = 2; } // TokenRequest contains parameters of a service account token. message TokenRequest { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. optional string audience = 1; - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec". // // +optional @@ -445,11 +466,11 @@ message VolumeAttachment { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. optional VolumeAttachmentSpec spec = 2; - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -463,7 +484,7 @@ message VolumeAttachmentList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } @@ -472,7 +493,7 @@ message VolumeAttachmentList { // in future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; @@ -488,39 +509,39 @@ message VolumeAttachmentSource { // VolumeAttachmentSpec is the specification of a VolumeAttachment request. message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). optional string attacher = 1; - // Source represents the volume that should be attached. + // source represents the volume that should be attached. optional VolumeAttachmentSource source = 2; - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. optional string nodeName = 3; } // VolumeAttachmentStatus is the status of a VolumeAttachment request. message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. optional bool attached = 1; - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional map attachmentMetadata = 2; - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional optional VolumeError attachError = 3; - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -529,11 +550,11 @@ message VolumeAttachmentStatus { // VolumeError captures an error encountered during a volume operation. message VolumeError { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -542,7 +563,7 @@ message VolumeError { // VolumeNodeResources is a set of resource limits for scheduling of volumes. message VolumeNodeResources { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is not specified, then the supported number of volumes on this node is unbounded. diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index 4812287ab..c785f368e 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -33,41 +33,42 @@ import ( // according to etcd is in ObjectMeta.Name. type StorageClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"` - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand. // +optional AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -81,12 +82,13 @@ type StorageClass struct { // StorageClassList is a collection of storage classes. type StorageClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of StorageClasses + // items is the list of StorageClasses Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -122,11 +124,11 @@ type VolumeAttachment struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -138,25 +140,26 @@ type VolumeAttachment struct { // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - // Source represents the volume that should be attached. + // source represents the volume that should be attached. Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` } @@ -165,7 +168,7 @@ type VolumeAttachmentSpec struct { // in future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` @@ -181,26 +184,26 @@ type VolumeAttachmentSource struct { // VolumeAttachmentStatus is the status of a VolumeAttachment request. type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -209,11 +212,11 @@ type VolumeAttachmentStatus struct { // VolumeError captures an error encountered during a volume operation. type VolumeError struct { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -242,7 +245,7 @@ type CSIDriver struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` } @@ -279,16 +282,15 @@ type CSIDriverSpec struct { // +optional AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"` - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name @@ -310,29 +312,27 @@ type CSIDriverSpec struct { PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html - // A driver can support one or more of these modes and - // more modes may be added in the future. - // This field is beta. + // A driver can support one or more of these modes and more modes may be added in the future. // + // This field is beta. // This field is immutable. // // +optional // +listType=set VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -349,7 +349,7 @@ type CSIDriverSpec struct { // +featureGate=CSIStorageCapacity StorageCapacity *bool `json:"storageCapacity,omitempty" protobuf:"bytes,4,opt,name=storageCapacity"` - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -359,10 +359,11 @@ type CSIDriverSpec struct { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"` - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -382,7 +383,7 @@ type CSIDriverSpec struct { // +listType=atomic TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"` - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -392,6 +393,28 @@ type CSIDriverSpec struct { // // +optional RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"` + + // seLinuxMount specifies if the CSI driver supports "-o context" + // mount option. + // + // When "true", the CSI driver must ensure that all volumes provided by this CSI + // driver can be mounted separately with different `-o context` options. This is + // typical for storage backends that provide volumes as filesystems on block + // devices or as independent shared volumes. + // Kubernetes will call NodeStage / NodePublish with "-o context=xyz" mount + // option when mounting a ReadWriteOncePod volume used in Pod that has + // explicitly set SELinux context. In the future, it may be expanded to other + // volume AccessModes. In any case, Kubernetes will ensure that the volume is + // mounted only with a single SELinux context. + // + // When "false", Kubernetes won't pass any special SELinux mount options to the driver. + // This is typical for volumes that represent subdirectories of a bigger shared filesystem. + // + // Default is "false". + // + // +featureGate=SELinuxMountReadWriteOncePod + // +optional + SELinuxMount *bool `json:"seLinuxMount,omitempty" protobuf:"varint,8,opt,name=seLinuxMount"` } // FSGroupPolicy specifies if a CSI Driver supports modifying @@ -432,12 +455,11 @@ type VolumeLifecycleMode string // TokenRequest contains parameters of a service account token. type TokenRequest struct { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. - // Audience string `json:"audience" protobuf:"bytes,1,opt,name=audience"` - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec". // // +optional @@ -481,6 +503,7 @@ const ( type CSINode struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // metadata.name must be the Kubernetes node name. metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -499,7 +522,7 @@ type CSINodeSpec struct { // CSINodeDriver holds information about the specification of one CSI driver installed on a node type CSINodeDriver struct { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` @@ -536,7 +559,7 @@ type CSINodeDriver struct { // VolumeNodeResources is a set of resource limits for scheduling of volumes. type VolumeNodeResources struct { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is not specified, then the supported number of volumes on this node is unbounded. @@ -588,11 +611,12 @@ type CSINodeList struct { // node. type CSIStorageCapacity struct { metav1.TypeMeta `json:",inline"` - // Standard object's metadata. The name has no particular meaning. It must be - // be a DNS subdomain (dots allowed, 253 characters). To ensure that - // there are no conflicts with other CSI drivers on the cluster, the recommendation - // is to use csisc-, a generated name, or a reverse-domain name which ends - // with the unique CSI driver name. + + // Standard object's metadata. + // The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters). + // To ensure that there are no conflicts with other CSI drivers on the cluster, + // the recommendation is to use csisc-, a generated name, or a reverse-domain name + // which ends with the unique CSI driver name. // // Objects are namespaced. // @@ -600,7 +624,7 @@ type CSIStorageCapacity struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -609,7 +633,7 @@ type CSIStorageCapacity struct { // +optional NodeTopology *metav1.LabelSelector `json:"nodeTopology,omitempty" protobuf:"bytes,2,opt,name=nodeTopology"` - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -617,7 +641,7 @@ type CSIStorageCapacity struct { // This field is immutable. StorageClassName string `json:"storageClassName" protobuf:"bytes,3,name=storageClassName"` - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -629,7 +653,7 @@ type CSIStorageCapacity struct { // +optional Capacity *resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,4,opt,name=capacity"` - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -649,12 +673,13 @@ type CSIStorageCapacity struct { // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. type CSIStorageCapacityList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index 7a407db46..c92a7f95a 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIDriver = map[string]string{ "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the CSI Driver.", + "spec": "spec represents the specification of the CSI Driver.", } func (CSIDriver) SwaggerDoc() map[string]string { @@ -50,12 +50,13 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", - "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.\n\nThis field is immutable.", - "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", - "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", - "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", - "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is beta. This field is immutable.", + "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", + "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "tokenRequests": "tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", + "requiresRepublish": "requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", + "seLinuxMount": "seLinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -64,7 +65,7 @@ func (CSIDriverSpec) SwaggerDoc() map[string]string { var map_CSINode = map[string]string{ "": "CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.", - "metadata": "metadata.name must be the Kubernetes node name.", + "metadata": "Standard object's metadata. metadata.name must be the Kubernetes node name.", "spec": "spec is the specification of CSINode", } @@ -74,7 +75,7 @@ func (CSINode) SwaggerDoc() map[string]string { var map_CSINodeDriver = map[string]string{ "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", - "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", + "name": "name represents the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", "allocatable": "allocatable represents the volume resources of a node that are available for scheduling. This field is beta.", @@ -105,11 +106,11 @@ func (CSINodeSpec) SwaggerDoc() map[string]string { var map_CSIStorageCapacity = map[string]string{ "": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node.", - "metadata": "Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "nodeTopology": "NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", - "storageClassName": "The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", - "capacity": "Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", - "maximumVolumeSize": "MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", + "metadata": "Standard object's metadata. The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "nodeTopology": "nodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", + "storageClassName": "storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", + "capacity": "capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", + "maximumVolumeSize": "maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", } func (CSIStorageCapacity) SwaggerDoc() map[string]string { @@ -119,7 +120,7 @@ func (CSIStorageCapacity) SwaggerDoc() map[string]string { var map_CSIStorageCapacityList = map[string]string{ "": "CSIStorageCapacityList is a collection of CSIStorageCapacity objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of CSIStorageCapacity objects.", + "items": "items is the list of CSIStorageCapacity objects.", } func (CSIStorageCapacityList) SwaggerDoc() map[string]string { @@ -129,13 +130,13 @@ func (CSIStorageCapacityList) SwaggerDoc() map[string]string { var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "provisioner": "Provisioner indicates the type of the provisioner.", - "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", - "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", - "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", - "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", - "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", - "allowedTopologies": "Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", + "provisioner": "provisioner indicates the type of the provisioner.", + "parameters": "parameters holds the parameters for the provisioner that should create volumes of this storage class.", + "reclaimPolicy": "reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. Defaults to Delete.", + "mountOptions": "mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", + "allowVolumeExpansion": "allowVolumeExpansion shows whether the storage class allow volume expand.", + "volumeBindingMode": "volumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", + "allowedTopologies": "allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", } func (StorageClass) SwaggerDoc() map[string]string { @@ -145,7 +146,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of StorageClasses", + "items": "items is the list of StorageClasses", } func (StorageClassList) SwaggerDoc() map[string]string { @@ -154,8 +155,8 @@ func (StorageClassList) SwaggerDoc() map[string]string { var map_TokenRequest = map[string]string{ "": "TokenRequest contains parameters of a service account token.", - "audience": "Audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", - "expirationSeconds": "ExpirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\".", + "audience": "audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", + "expirationSeconds": "expirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\".", } func (TokenRequest) SwaggerDoc() map[string]string { @@ -165,8 +166,8 @@ func (TokenRequest) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", + "spec": "spec represents specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "status represents status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } func (VolumeAttachment) SwaggerDoc() map[string]string { @@ -176,7 +177,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", + "items": "items is the list of VolumeAttachments", } func (VolumeAttachmentList) SwaggerDoc() map[string]string { @@ -185,7 +186,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { var map_VolumeAttachmentSource = map[string]string{ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", + "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } func (VolumeAttachmentSource) SwaggerDoc() map[string]string { @@ -194,9 +195,9 @@ func (VolumeAttachmentSource) SwaggerDoc() map[string]string { var map_VolumeAttachmentSpec = map[string]string{ "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", + "attacher": "attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "source represents the volume that should be attached.", + "nodeName": "nodeName represents the node that the volume should be attached to.", } func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { @@ -205,10 +206,10 @@ func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { var map_VolumeAttachmentStatus = map[string]string{ "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", + "attached": "attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "attachError represents the last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "detachError represents the last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", } func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { @@ -217,8 +218,8 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", + "time": "time represents the time the error was encountered.", + "message": "message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { @@ -227,7 +228,7 @@ func (VolumeError) SwaggerDoc() map[string]string { var map_VolumeNodeResources = map[string]string{ "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", - "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", + "count": "count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", } func (VolumeNodeResources) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go index b14b7fbcd..74ae83bca 100644 --- a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -127,6 +127,11 @@ func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { *out = new(bool) **out = **in } + if in.SELinuxMount != nil { + in, out := &in.SELinuxMount, &out.SELinuxMount + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto index a53451226..88250a0f0 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -67,7 +67,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -76,7 +76,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -84,7 +84,7 @@ message CSIStorageCapacity { // This field is immutable. optional string storageClassName = 3; - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -96,7 +96,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -118,7 +118,7 @@ message CSIStorageCapacityList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name repeated CSIStorageCapacity items = 2; @@ -134,11 +134,11 @@ message VolumeAttachment { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. optional VolumeAttachmentSpec spec = 2; - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -152,7 +152,7 @@ message VolumeAttachmentList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } @@ -161,7 +161,7 @@ message VolumeAttachmentList { // in future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; @@ -177,39 +177,39 @@ message VolumeAttachmentSource { // VolumeAttachmentSpec is the specification of a VolumeAttachment request. message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). optional string attacher = 1; - // Source represents the volume that should be attached. + // source represents the volume that should be attached. optional VolumeAttachmentSource source = 2; - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. optional string nodeName = 3; } // VolumeAttachmentStatus is the status of a VolumeAttachment request. message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. optional bool attached = 1; - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional map attachmentMetadata = 2; - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional optional VolumeError attachError = 3; - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -218,11 +218,11 @@ message VolumeAttachmentStatus { // VolumeError captures an error encountered during a volume operation. message VolumeError { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string maybe logged, so it should not contain sensitive // information. // +optional diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go index fe8c9e3cd..59ef348a3 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -41,11 +41,11 @@ type VolumeAttachment struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -60,25 +60,26 @@ type VolumeAttachment struct { // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - // Source represents the volume that should be attached. + // source represents the volume that should be attached. Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` } @@ -87,7 +88,7 @@ type VolumeAttachmentSpec struct { // in future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` @@ -103,26 +104,26 @@ type VolumeAttachmentSource struct { // VolumeAttachmentStatus is the status of a VolumeAttachment request. type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -131,11 +132,11 @@ type VolumeAttachmentStatus struct { // VolumeError captures an error encountered during a volume operation. type VolumeError struct { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string maybe logged, so it should not contain sensitive // information. // +optional @@ -174,6 +175,7 @@ type VolumeError struct { // node. type CSIStorageCapacity struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. The name has no particular meaning. It must be // be a DNS subdomain (dots allowed, 253 characters). To ensure that // there are no conflicts with other CSI drivers on the cluster, the recommendation @@ -186,7 +188,7 @@ type CSIStorageCapacity struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -195,7 +197,7 @@ type CSIStorageCapacity struct { // +optional NodeTopology *metav1.LabelSelector `json:"nodeTopology,omitempty" protobuf:"bytes,2,opt,name=nodeTopology"` - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -203,7 +205,7 @@ type CSIStorageCapacity struct { // This field is immutable. StorageClassName string `json:"storageClassName" protobuf:"bytes,3,name=storageClassName"` - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -215,7 +217,7 @@ type CSIStorageCapacity struct { // +optional Capacity *resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,4,opt,name=capacity"` - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -238,12 +240,13 @@ type CSIStorageCapacity struct { // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. type CSIStorageCapacityList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go index a228a3fec..ba6afbd59 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIStorageCapacity = map[string]string{ "": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node.", "metadata": "Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "nodeTopology": "NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", - "storageClassName": "The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", - "capacity": "Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", - "maximumVolumeSize": "MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", + "nodeTopology": "nodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", + "storageClassName": "storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", + "capacity": "capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", + "maximumVolumeSize": "maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", } func (CSIStorageCapacity) SwaggerDoc() map[string]string { @@ -43,7 +43,7 @@ func (CSIStorageCapacity) SwaggerDoc() map[string]string { var map_CSIStorageCapacityList = map[string]string{ "": "CSIStorageCapacityList is a collection of CSIStorageCapacity objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of CSIStorageCapacity objects.", + "items": "items is the list of CSIStorageCapacity objects.", } func (CSIStorageCapacityList) SwaggerDoc() map[string]string { @@ -53,8 +53,8 @@ func (CSIStorageCapacityList) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", + "spec": "spec represents specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "status represents status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } func (VolumeAttachment) SwaggerDoc() map[string]string { @@ -64,7 +64,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", + "items": "items is the list of VolumeAttachments", } func (VolumeAttachmentList) SwaggerDoc() map[string]string { @@ -73,7 +73,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { var map_VolumeAttachmentSource = map[string]string{ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", + "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } func (VolumeAttachmentSource) SwaggerDoc() map[string]string { @@ -82,9 +82,9 @@ func (VolumeAttachmentSource) SwaggerDoc() map[string]string { var map_VolumeAttachmentSpec = map[string]string{ "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", + "attacher": "attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "source represents the volume that should be attached.", + "nodeName": "nodeName represents the node that the volume should be attached to.", } func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { @@ -93,10 +93,10 @@ func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { var map_VolumeAttachmentStatus = map[string]string{ "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", + "attached": "attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "attachError represents the last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "detachError represents the last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", } func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { @@ -105,8 +105,8 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", + "time": "time represents the time the error was encountered.", + "message": "message represents the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index 379ce8f18..42ef65ca0 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -609,111 +609,112 @@ func init() { } var fileDescriptor_7d2980599fd0de80 = []byte{ - // 1654 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6f, 0x1b, 0x37, - 0x16, 0xf7, 0x58, 0xf2, 0x17, 0x65, 0xc7, 0x36, 0xed, 0x64, 0xb5, 0x3a, 0x48, 0x86, 0x16, 0xbb, - 0x71, 0x82, 0xec, 0x28, 0xf1, 0x66, 0x83, 0x20, 0x40, 0x80, 0xf5, 0xd8, 0xde, 0x8d, 0x12, 0xcb, - 0x71, 0x28, 0x23, 0x08, 0x82, 0x3d, 0x2c, 0x35, 0x43, 0xcb, 0x8c, 0x35, 0x1f, 0x19, 0x52, 0xde, - 0xa8, 0xa7, 0xf6, 0xd2, 0x73, 0xd1, 0x43, 0xef, 0x05, 0xfa, 0x2f, 0xb4, 0x40, 0x7b, 0xe9, 0xb1, - 0x01, 0x0a, 0x14, 0x41, 0x4f, 0x39, 0x09, 0x8d, 0xfa, 0x27, 0x14, 0xe8, 0xc1, 0xe8, 0xa1, 0x20, - 0x87, 0xd2, 0x7c, 0x49, 0xb1, 0xdd, 0x83, 0x6e, 0xe2, 0xfb, 0xf8, 0xbd, 0x47, 0xf2, 0xf7, 0x1e, - 0xdf, 0x08, 0x6c, 0x1d, 0xdf, 0x65, 0x3a, 0x75, 0x2b, 0xc7, 0xed, 0x06, 0xf1, 0x1d, 0xc2, 0x09, - 0xab, 0x9c, 0x10, 0xc7, 0x72, 0xfd, 0x8a, 0x52, 0x60, 0x8f, 0x56, 0x18, 0x77, 0x7d, 0xdc, 0x24, - 0x95, 0x93, 0x5b, 0x0d, 0xc2, 0xf1, 0xad, 0x4a, 0x93, 0x38, 0xc4, 0xc7, 0x9c, 0x58, 0xba, 0xe7, - 0xbb, 0xdc, 0x85, 0x85, 0xc0, 0x56, 0xc7, 0x1e, 0xd5, 0x95, 0xad, 0xae, 0x6c, 0x0b, 0x7f, 0x6f, - 0x52, 0x7e, 0xd4, 0x6e, 0xe8, 0xa6, 0x6b, 0x57, 0x9a, 0x6e, 0xd3, 0xad, 0x48, 0x97, 0x46, 0xfb, - 0x50, 0xae, 0xe4, 0x42, 0xfe, 0x0a, 0xa0, 0x0a, 0xe5, 0x48, 0x58, 0xd3, 0xf5, 0x45, 0xcc, 0x64, - 0xb8, 0xc2, 0xed, 0xd0, 0xc6, 0xc6, 0xe6, 0x11, 0x75, 0x88, 0xdf, 0xa9, 0x78, 0xc7, 0x4d, 0xe9, - 0xe4, 0x13, 0xe6, 0xb6, 0x7d, 0x93, 0x5c, 0xc8, 0x8b, 0x55, 0x6c, 0xc2, 0xf1, 0xb0, 0x58, 0x95, - 0x51, 0x5e, 0x7e, 0xdb, 0xe1, 0xd4, 0x4e, 0x87, 0xb9, 0x73, 0x96, 0x03, 0x33, 0x8f, 0x88, 0x8d, - 0x93, 0x7e, 0xe5, 0x6f, 0x34, 0x30, 0xb7, 0x55, 0xaf, 0x6e, 0xfb, 0xf4, 0x84, 0xf8, 0xf0, 0x7f, - 0x60, 0x56, 0x64, 0x64, 0x61, 0x8e, 0xf3, 0xda, 0x9a, 0xb6, 0x9e, 0xdb, 0xb8, 0xa9, 0x87, 0x87, - 0x3c, 0x00, 0xd6, 0xbd, 0xe3, 0xa6, 0x10, 0x30, 0x5d, 0x58, 0xeb, 0x27, 0xb7, 0xf4, 0xc7, 0x8d, - 0x17, 0xc4, 0xe4, 0x35, 0xc2, 0xb1, 0x01, 0x5f, 0x77, 0x4b, 0x13, 0xbd, 0x6e, 0x09, 0x84, 0x32, - 0x34, 0x40, 0x85, 0x8f, 0x40, 0x96, 0x79, 0xc4, 0xcc, 0x4f, 0x4a, 0xf4, 0x6b, 0xfa, 0xe8, 0x2b, - 0xd4, 0x07, 0x69, 0xd5, 0x3d, 0x62, 0x1a, 0xf3, 0x0a, 0x36, 0x2b, 0x56, 0x48, 0x82, 0x94, 0xbf, - 0xd6, 0xc0, 0xc2, 0xc0, 0x6a, 0x97, 0x32, 0x0e, 0xff, 0x9b, 0xda, 0x80, 0x7e, 0xbe, 0x0d, 0x08, - 0x6f, 0x99, 0xfe, 0x92, 0x8a, 0x33, 0xdb, 0x97, 0x44, 0x92, 0x7f, 0x08, 0xa6, 0x28, 0x27, 0x36, - 0xcb, 0x4f, 0xae, 0x65, 0xd6, 0x73, 0x1b, 0x7f, 0x3d, 0x57, 0xf6, 0xc6, 0x82, 0x42, 0x9c, 0xaa, - 0x0a, 0x5f, 0x14, 0x40, 0x94, 0x3f, 0xcf, 0x46, 0x72, 0x17, 0x7b, 0x82, 0xf7, 0xc0, 0x25, 0xcc, - 0x39, 0x36, 0x8f, 0x10, 0x79, 0xd9, 0xa6, 0x3e, 0xb1, 0xe4, 0x0e, 0x66, 0x0d, 0xd8, 0xeb, 0x96, - 0x2e, 0x6d, 0xc6, 0x34, 0x28, 0x61, 0x29, 0x7c, 0x3d, 0xd7, 0xaa, 0x3a, 0x87, 0xee, 0x63, 0xa7, - 0xe6, 0xb6, 0x1d, 0x2e, 0x0f, 0x58, 0xf9, 0xee, 0xc7, 0x34, 0x28, 0x61, 0x09, 0x4d, 0xb0, 0x7a, - 0xe2, 0xb6, 0xda, 0x36, 0xd9, 0xa5, 0x87, 0xc4, 0xec, 0x98, 0x2d, 0x52, 0x73, 0x2d, 0xc2, 0xf2, - 0x99, 0xb5, 0xcc, 0xfa, 0x9c, 0x51, 0xe9, 0x75, 0x4b, 0xab, 0x4f, 0x87, 0xe8, 0x4f, 0xbb, 0xa5, - 0x95, 0x21, 0x72, 0x34, 0x14, 0x0c, 0xde, 0x07, 0x8b, 0xea, 0x84, 0xb6, 0xb0, 0x87, 0x4d, 0xca, - 0x3b, 0xf9, 0xac, 0xcc, 0x70, 0xa5, 0xd7, 0x2d, 0x2d, 0xd6, 0xe3, 0x2a, 0x94, 0xb4, 0x85, 0x0f, - 0xc0, 0xc2, 0x21, 0xfb, 0x8f, 0xef, 0xb6, 0xbd, 0x7d, 0xb7, 0x45, 0xcd, 0x4e, 0x7e, 0x6a, 0x4d, - 0x5b, 0x9f, 0x33, 0xca, 0xbd, 0x6e, 0x69, 0xe1, 0xdf, 0xf5, 0x88, 0xe2, 0x34, 0x29, 0x40, 0x71, - 0x47, 0x48, 0xc0, 0x02, 0x77, 0x8f, 0x89, 0x23, 0x8e, 0x8e, 0x30, 0xce, 0xf2, 0xd3, 0xf2, 0x2e, - 0xd7, 0xdf, 0x77, 0x97, 0x07, 0x11, 0x07, 0xe3, 0xb2, 0xba, 0xce, 0x85, 0xa8, 0x94, 0xa1, 0x38, - 0x2a, 0xdc, 0x02, 0xcb, 0x7e, 0x70, 0x39, 0x0c, 0x11, 0xaf, 0xdd, 0x68, 0x51, 0x76, 0x94, 0x9f, - 0x91, 0x3b, 0xbe, 0xdc, 0xeb, 0x96, 0x96, 0x51, 0x52, 0x89, 0xd2, 0xf6, 0xe5, 0xaf, 0x34, 0x30, - 0xb3, 0x55, 0xaf, 0xee, 0xb9, 0x16, 0x19, 0x43, 0x69, 0x56, 0x63, 0xa5, 0x79, 0xf5, 0x0c, 0x72, - 0x8b, 0xa4, 0x46, 0x16, 0xe6, 0x2f, 0x41, 0x61, 0x0a, 0x1b, 0xd5, 0x59, 0xd6, 0x40, 0xd6, 0xc1, - 0x36, 0x91, 0xa9, 0xcf, 0x85, 0x3e, 0x7b, 0xd8, 0x26, 0x48, 0x6a, 0xe0, 0xdf, 0xc0, 0xb4, 0xe3, - 0x5a, 0xa4, 0xba, 0x2d, 0x13, 0x98, 0x33, 0x2e, 0x29, 0x9b, 0xe9, 0x3d, 0x29, 0x45, 0x4a, 0x0b, - 0x6f, 0x83, 0x79, 0xee, 0x7a, 0x6e, 0xcb, 0x6d, 0x76, 0x1e, 0x91, 0x4e, 0x9f, 0xa6, 0x4b, 0xbd, - 0x6e, 0x69, 0xfe, 0x20, 0x22, 0x47, 0x31, 0x2b, 0xd8, 0x00, 0x39, 0xdc, 0x6a, 0xb9, 0x26, 0xe6, - 0xb8, 0xd1, 0x22, 0x92, 0x7b, 0xb9, 0x8d, 0xca, 0xfb, 0xf6, 0x18, 0x70, 0x5b, 0x04, 0x47, 0xaa, - 0xb7, 0x33, 0x63, 0xb1, 0xd7, 0x2d, 0xe5, 0x36, 0x43, 0x1c, 0x14, 0x05, 0x2d, 0x7f, 0xa9, 0x81, - 0x9c, 0xda, 0xf5, 0x18, 0x9a, 0xd1, 0x83, 0x78, 0x33, 0xfa, 0xcb, 0x39, 0xee, 0x6b, 0x44, 0x2b, - 0x32, 0x07, 0x69, 0xcb, 0x3e, 0x74, 0x00, 0x66, 0x2c, 0x79, 0x69, 0x2c, 0xaf, 0x49, 0xe8, 0x6b, - 0xe7, 0x80, 0x56, 0xbd, 0x6e, 0x51, 0x05, 0x98, 0x09, 0xd6, 0x0c, 0xf5, 0xa1, 0xca, 0xbf, 0x66, - 0x00, 0xdc, 0xaa, 0x57, 0x13, 0x95, 0x3e, 0x06, 0x5a, 0x53, 0x30, 0x2f, 0x98, 0xd3, 0xe7, 0x86, - 0xa2, 0xf7, 0x3f, 0xce, 0x79, 0x13, 0xb8, 0x41, 0x5a, 0x75, 0xd2, 0x22, 0x26, 0x77, 0xfd, 0x80, - 0x64, 0x7b, 0x11, 0x30, 0x14, 0x83, 0x86, 0xdb, 0x60, 0xa9, 0xdf, 0xb8, 0x5a, 0x98, 0x31, 0x41, - 0xee, 0x7c, 0x46, 0x92, 0x39, 0xaf, 0x52, 0x5c, 0xaa, 0x27, 0xf4, 0x28, 0xe5, 0x01, 0x9f, 0x81, - 0x59, 0x33, 0xda, 0x23, 0xcf, 0xa0, 0x8d, 0xde, 0x1f, 0x3d, 0xf4, 0x27, 0x6d, 0xec, 0x70, 0xca, - 0x3b, 0xc6, 0xbc, 0xa0, 0xcc, 0xa0, 0x99, 0x0e, 0xd0, 0x20, 0x03, 0xcb, 0x36, 0x7e, 0x45, 0xed, - 0xb6, 0x1d, 0x90, 0xbb, 0x4e, 0x3f, 0x20, 0xb2, 0x93, 0x5e, 0x3c, 0x84, 0x6c, 0x62, 0xb5, 0x24, - 0x18, 0x4a, 0xe3, 0x97, 0xbf, 0xd7, 0xc0, 0x95, 0xf4, 0xc5, 0x8f, 0xa1, 0x40, 0xea, 0xf1, 0x02, - 0xd1, 0xcf, 0x60, 0x71, 0x22, 0xc1, 0x11, 0xb5, 0xf2, 0xe9, 0x34, 0x98, 0x8f, 0xde, 0xe1, 0x18, - 0x08, 0xfc, 0x4f, 0x90, 0xf3, 0x7c, 0xf7, 0x84, 0x32, 0xea, 0x3a, 0xc4, 0x57, 0xdd, 0x71, 0x45, - 0xb9, 0xe4, 0xf6, 0x43, 0x15, 0x8a, 0xda, 0xc1, 0x16, 0x00, 0x1e, 0xf6, 0xb1, 0x4d, 0xb8, 0xa8, - 0xe4, 0x8c, 0x3c, 0x83, 0xbb, 0xef, 0x3b, 0x83, 0xe8, 0xb6, 0xf4, 0xfd, 0x81, 0xeb, 0x8e, 0xc3, - 0xfd, 0x4e, 0x98, 0x62, 0xa8, 0x40, 0x11, 0x7c, 0x78, 0x0c, 0x16, 0x7c, 0x62, 0xb6, 0x30, 0xb5, - 0xd5, 0x03, 0x9d, 0x95, 0x69, 0xee, 0x88, 0x87, 0x12, 0x45, 0x15, 0xa7, 0xdd, 0xd2, 0xcd, 0xf4, - 0xb0, 0xad, 0xef, 0x13, 0x9f, 0x51, 0xc6, 0x89, 0xc3, 0x03, 0xea, 0xc4, 0x7c, 0x50, 0x1c, 0x5b, - 0x3c, 0x01, 0xb6, 0x18, 0x5d, 0x1e, 0x7b, 0x9c, 0xba, 0x0e, 0xcb, 0x4f, 0x85, 0x4f, 0x40, 0x2d, - 0x22, 0x47, 0x31, 0x2b, 0xb8, 0x0b, 0x56, 0x45, 0xb7, 0xfe, 0x7f, 0x10, 0x60, 0xe7, 0x95, 0x87, - 0x1d, 0x71, 0x54, 0xf9, 0x69, 0xf9, 0x2a, 0xe7, 0xc5, 0x9c, 0xb3, 0x39, 0x44, 0x8f, 0x86, 0x7a, - 0xc1, 0x67, 0x60, 0x39, 0x18, 0x74, 0x0c, 0xea, 0x58, 0xd4, 0x69, 0x8a, 0x31, 0x47, 0x3e, 0xf0, - 0x73, 0xc6, 0x75, 0x51, 0x1b, 0x4f, 0x93, 0xca, 0xd3, 0x61, 0x42, 0x94, 0x06, 0x81, 0x2f, 0xc1, - 0xb2, 0x8c, 0x48, 0x2c, 0xd5, 0x58, 0x28, 0x61, 0xf9, 0xd9, 0xf4, 0x94, 0x22, 0x8e, 0x4e, 0x10, - 0xa9, 0xdf, 0x7e, 0xfa, 0x6d, 0xea, 0x80, 0xf8, 0xb6, 0xf1, 0x67, 0x75, 0x5f, 0xcb, 0x9b, 0x49, - 0x28, 0x94, 0x46, 0x2f, 0xdc, 0x07, 0x8b, 0x89, 0x0b, 0x87, 0x4b, 0x20, 0x73, 0x4c, 0x3a, 0xc1, - 0x7b, 0x8d, 0xc4, 0x4f, 0xb8, 0x0a, 0xa6, 0x4e, 0x70, 0xab, 0x4d, 0x02, 0x06, 0xa2, 0x60, 0x71, - 0x6f, 0xf2, 0xae, 0x56, 0xfe, 0x56, 0x03, 0xb1, 0xc6, 0x36, 0x86, 0xe2, 0xae, 0xc5, 0x8b, 0x7b, - 0xfd, 0xbc, 0xc4, 0x1e, 0x51, 0xd6, 0x1f, 0x69, 0x60, 0x3e, 0x3a, 0xcf, 0xc1, 0x1b, 0x60, 0x16, - 0xb7, 0x2d, 0x4a, 0x1c, 0xb3, 0x3f, 0xb3, 0x0c, 0xb2, 0xd9, 0x54, 0x72, 0x34, 0xb0, 0x10, 0xd3, - 0x1e, 0x79, 0xe5, 0x51, 0x1f, 0x0b, 0xa6, 0xd5, 0x89, 0xe9, 0x3a, 0x16, 0x93, 0xc7, 0x94, 0x09, - 0x1a, 0xe5, 0x4e, 0x52, 0x89, 0xd2, 0xf6, 0xe5, 0x2f, 0x26, 0xc1, 0x52, 0x40, 0x90, 0x60, 0xd8, - 0xb7, 0x89, 0xc3, 0xc7, 0xd0, 0x5e, 0x50, 0x6c, 0xec, 0xbb, 0x79, 0xf6, 0x48, 0x14, 0x66, 0x37, - 0x6a, 0xfe, 0x83, 0xcf, 0xc1, 0x34, 0xe3, 0x98, 0xb7, 0x99, 0x7c, 0xfe, 0x72, 0x1b, 0x1b, 0x17, - 0x42, 0x95, 0x9e, 0xe1, 0xfc, 0x17, 0xac, 0x91, 0x42, 0x2c, 0x7f, 0xa7, 0x81, 0xd5, 0xa4, 0xcb, - 0x18, 0x08, 0xf7, 0x24, 0x4e, 0xb8, 0x1b, 0x17, 0xd9, 0xd1, 0x08, 0xd2, 0xfd, 0xa8, 0x81, 0x2b, - 0xa9, 0xcd, 0xcb, 0x77, 0x56, 0xf4, 0x2a, 0x2f, 0xd1, 0x11, 0xf7, 0xc2, 0xf1, 0x59, 0xf6, 0xaa, - 0xfd, 0x21, 0x7a, 0x34, 0xd4, 0x0b, 0xbe, 0x00, 0x4b, 0xd4, 0x69, 0x51, 0x87, 0xa8, 0x67, 0x39, - 0xbc, 0xee, 0xa1, 0x0d, 0x25, 0x89, 0x2c, 0xaf, 0x79, 0x55, 0x4c, 0x2f, 0xd5, 0x04, 0x0a, 0x4a, - 0xe1, 0x96, 0x7f, 0x18, 0x72, 0x3d, 0x72, 0xac, 0x14, 0x15, 0x25, 0x25, 0xc4, 0x4f, 0x55, 0x94, - 0x92, 0xa3, 0x81, 0x85, 0x64, 0x90, 0x3c, 0x0a, 0x95, 0xe8, 0xc5, 0x18, 0x24, 0x3d, 0x23, 0x0c, - 0x92, 0x6b, 0xa4, 0x10, 0x45, 0x26, 0x62, 0x6c, 0x8b, 0x8c, 0x67, 0x83, 0x4c, 0xf6, 0x94, 0x1c, - 0x0d, 0x2c, 0xca, 0xbf, 0x65, 0x86, 0xdc, 0x92, 0xa4, 0x62, 0x64, 0x4b, 0xfd, 0x6f, 0xf5, 0xe4, - 0x96, 0xac, 0xc1, 0x96, 0x2c, 0xf8, 0x99, 0x06, 0x20, 0x1e, 0x40, 0xd4, 0xfa, 0x54, 0x0d, 0xf8, - 0xf4, 0xf0, 0xe2, 0x15, 0xa2, 0x6f, 0xa6, 0xc0, 0x82, 0xb7, 0xba, 0xa0, 0x92, 0x80, 0x69, 0x03, - 0x34, 0x24, 0x03, 0x48, 0x41, 0x2e, 0x90, 0xee, 0xf8, 0xbe, 0xeb, 0xab, 0x92, 0xbd, 0x7a, 0x76, - 0x42, 0xd2, 0xdc, 0x28, 0xca, 0x6f, 0xa2, 0xd0, 0xff, 0xb4, 0x5b, 0xca, 0x45, 0xf4, 0x28, 0x8a, - 0x2d, 0x42, 0x59, 0x24, 0x0c, 0x95, 0xfd, 0x03, 0xa1, 0xb6, 0xc9, 0xe8, 0x50, 0x11, 0xec, 0xc2, - 0x0e, 0xf8, 0xd3, 0x88, 0x03, 0xba, 0xd0, 0xdb, 0xf6, 0xb1, 0x06, 0xa2, 0x31, 0xe0, 0x2e, 0xc8, - 0x72, 0xaa, 0x2a, 0x31, 0xb7, 0x71, 0xfd, 0x7c, 0x1d, 0xe6, 0x80, 0xda, 0x24, 0x6c, 0x94, 0x62, - 0x85, 0x24, 0x0a, 0xbc, 0x06, 0x66, 0x6c, 0xc2, 0x18, 0x6e, 0xaa, 0xc8, 0xe1, 0x07, 0x54, 0x2d, - 0x10, 0xa3, 0xbe, 0xbe, 0x7c, 0x07, 0xac, 0x0c, 0xf9, 0x24, 0x85, 0x25, 0x30, 0x65, 0xca, 0x3f, - 0x7c, 0x44, 0x42, 0x53, 0xc6, 0x9c, 0xe8, 0x32, 0x5b, 0xf2, 0x7f, 0x9e, 0x40, 0x6e, 0xfc, 0xeb, - 0xf5, 0xbb, 0xe2, 0xc4, 0x9b, 0x77, 0xc5, 0x89, 0xb7, 0xef, 0x8a, 0x13, 0x1f, 0xf6, 0x8a, 0xda, - 0xeb, 0x5e, 0x51, 0x7b, 0xd3, 0x2b, 0x6a, 0x6f, 0x7b, 0x45, 0xed, 0xa7, 0x5e, 0x51, 0xfb, 0xe4, - 0xe7, 0xe2, 0xc4, 0xf3, 0xc2, 0xe8, 0xff, 0x5d, 0x7f, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x91, 0x4f, - 0x2f, 0xc0, 0xad, 0x15, 0x00, 0x00, + // 1672 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6f, 0x1b, 0x4d, + 0x19, 0xcf, 0xc6, 0xce, 0xd7, 0x38, 0x69, 0x92, 0x49, 0x5a, 0x8c, 0x0f, 0x76, 0x64, 0x04, 0x4d, + 0xab, 0xb2, 0x6e, 0x43, 0xa9, 0xaa, 0x4a, 0x95, 0xc8, 0x26, 0x81, 0xba, 0x8d, 0xd3, 0x74, 0x1c, + 0x55, 0x55, 0xc5, 0x81, 0xf1, 0xee, 0xc4, 0x99, 0xc6, 0xfb, 0xd1, 0x9d, 0xd9, 0x10, 0x73, 0x82, + 0x0b, 0x67, 0xc4, 0x81, 0xbf, 0x80, 0x7f, 0x01, 0x24, 0xb8, 0x70, 0xa4, 0x12, 0x12, 0xaa, 0xb8, + 0xd0, 0x93, 0x45, 0xcd, 0x9f, 0xf0, 0x4a, 0xef, 0x21, 0x7a, 0x0f, 0xaf, 0x66, 0x76, 0xec, 0xfd, + 0xb2, 0x9b, 0xe4, 0x3d, 0xf8, 0xe6, 0x79, 0x3e, 0x7e, 0xcf, 0x33, 0xf3, 0x7c, 0xae, 0xc1, 0xce, + 0xe9, 0x63, 0xa6, 0x53, 0xb7, 0x76, 0x1a, 0xb4, 0x88, 0xef, 0x10, 0x4e, 0x58, 0xed, 0x8c, 0x38, + 0x96, 0xeb, 0xd7, 0x14, 0x03, 0x7b, 0xb4, 0xc6, 0xb8, 0xeb, 0xe3, 0x36, 0xa9, 0x9d, 0x3d, 0x68, + 0x11, 0x8e, 0x1f, 0xd4, 0xda, 0xc4, 0x21, 0x3e, 0xe6, 0xc4, 0xd2, 0x3d, 0xdf, 0xe5, 0x2e, 0x2c, + 0x85, 0xb2, 0x3a, 0xf6, 0xa8, 0xae, 0x64, 0x75, 0x25, 0x5b, 0xfa, 0x71, 0x9b, 0xf2, 0x93, 0xa0, + 0xa5, 0x9b, 0xae, 0x5d, 0x6b, 0xbb, 0x6d, 0xb7, 0x26, 0x55, 0x5a, 0xc1, 0xb1, 0x3c, 0xc9, 0x83, + 0xfc, 0x15, 0x42, 0x95, 0xaa, 0x31, 0xb3, 0xa6, 0xeb, 0x0b, 0x9b, 0x69, 0x73, 0xa5, 0x87, 0x91, + 0x8c, 0x8d, 0xcd, 0x13, 0xea, 0x10, 0xbf, 0x5b, 0xf3, 0x4e, 0xdb, 0x52, 0xc9, 0x27, 0xcc, 0x0d, + 0x7c, 0x93, 0x5c, 0x4b, 0x8b, 0xd5, 0x6c, 0xc2, 0xf1, 0x28, 0x5b, 0xb5, 0x71, 0x5a, 0x7e, 0xe0, + 0x70, 0x6a, 0x67, 0xcd, 0x3c, 0xba, 0x4c, 0x81, 0x99, 0x27, 0xc4, 0xc6, 0x69, 0xbd, 0xea, 0xdf, + 0x35, 0xb0, 0xb0, 0xd3, 0xac, 0xef, 0xfa, 0xf4, 0x8c, 0xf8, 0xf0, 0x57, 0x60, 0x5e, 0x78, 0x64, + 0x61, 0x8e, 0x8b, 0xda, 0x86, 0xb6, 0x59, 0xd8, 0xba, 0xaf, 0x47, 0x8f, 0x3c, 0x04, 0xd6, 0xbd, + 0xd3, 0xb6, 0x20, 0x30, 0x5d, 0x48, 0xeb, 0x67, 0x0f, 0xf4, 0x97, 0xad, 0x77, 0xc4, 0xe4, 0x0d, + 0xc2, 0xb1, 0x01, 0x3f, 0xf4, 0x2a, 0x53, 0xfd, 0x5e, 0x05, 0x44, 0x34, 0x34, 0x44, 0x85, 0x2f, + 0x40, 0x9e, 0x79, 0xc4, 0x2c, 0x4e, 0x4b, 0xf4, 0x3b, 0xfa, 0xf8, 0x10, 0xea, 0x43, 0xb7, 0x9a, + 0x1e, 0x31, 0x8d, 0x45, 0x05, 0x9b, 0x17, 0x27, 0x24, 0x41, 0xaa, 0x7f, 0xd3, 0xc0, 0xd2, 0x50, + 0x6a, 0x9f, 0x32, 0x0e, 0x7f, 0x99, 0xb9, 0x80, 0x7e, 0xb5, 0x0b, 0x08, 0x6d, 0xe9, 0xfe, 0x8a, + 0xb2, 0x33, 0x3f, 0xa0, 0xc4, 0x9c, 0x7f, 0x0e, 0x66, 0x28, 0x27, 0x36, 0x2b, 0x4e, 0x6f, 0xe4, + 0x36, 0x0b, 0x5b, 0x3f, 0xbc, 0x92, 0xf7, 0xc6, 0x92, 0x42, 0x9c, 0xa9, 0x0b, 0x5d, 0x14, 0x42, + 0x54, 0xff, 0x9b, 0x8f, 0xf9, 0x2e, 0xee, 0x04, 0x9f, 0x80, 0x1b, 0x98, 0x73, 0x6c, 0x9e, 0x20, + 0xf2, 0x3e, 0xa0, 0x3e, 0xb1, 0xe4, 0x0d, 0xe6, 0x0d, 0xd8, 0xef, 0x55, 0x6e, 0x6c, 0x27, 0x38, + 0x28, 0x25, 0x29, 0x74, 0x3d, 0xd7, 0xaa, 0x3b, 0xc7, 0xee, 0x4b, 0xa7, 0xe1, 0x06, 0x0e, 0x97, + 0x0f, 0xac, 0x74, 0x0f, 0x13, 0x1c, 0x94, 0x92, 0x84, 0x26, 0x58, 0x3f, 0x73, 0x3b, 0x81, 0x4d, + 0xf6, 0xe9, 0x31, 0x31, 0xbb, 0x66, 0x87, 0x34, 0x5c, 0x8b, 0xb0, 0x62, 0x6e, 0x23, 0xb7, 0xb9, + 0x60, 0xd4, 0xfa, 0xbd, 0xca, 0xfa, 0xeb, 0x11, 0xfc, 0x8b, 0x5e, 0x65, 0x6d, 0x04, 0x1d, 0x8d, + 0x04, 0x83, 0x4f, 0xc1, 0xb2, 0x7a, 0xa1, 0x1d, 0xec, 0x61, 0x93, 0xf2, 0x6e, 0x31, 0x2f, 0x3d, + 0x5c, 0xeb, 0xf7, 0x2a, 0xcb, 0xcd, 0x24, 0x0b, 0xa5, 0x65, 0xe1, 0x33, 0xb0, 0x74, 0xcc, 0x7e, + 0xe1, 0xbb, 0x81, 0x77, 0xe8, 0x76, 0xa8, 0xd9, 0x2d, 0xce, 0x6c, 0x68, 0x9b, 0x0b, 0x46, 0xb5, + 0xdf, 0xab, 0x2c, 0xfd, 0xbc, 0x19, 0x63, 0x5c, 0xa4, 0x09, 0x28, 0xa9, 0x08, 0x09, 0x58, 0xe2, + 0xee, 0x29, 0x71, 0xc4, 0xd3, 0x11, 0xc6, 0x59, 0x71, 0x56, 0xc6, 0x72, 0xf3, 0x4b, 0xb1, 0x3c, + 0x8a, 0x29, 0x18, 0x37, 0x55, 0x38, 0x97, 0xe2, 0x54, 0x86, 0x92, 0xa8, 0x70, 0x07, 0xac, 0xfa, + 0x61, 0x70, 0x18, 0x22, 0x5e, 0xd0, 0xea, 0x50, 0x76, 0x52, 0x9c, 0x93, 0x37, 0xbe, 0xd9, 0xef, + 0x55, 0x56, 0x51, 0x9a, 0x89, 0xb2, 0xf2, 0xf0, 0x21, 0x58, 0x64, 0x64, 0x9f, 0x3a, 0xc1, 0x79, + 0x18, 0xd3, 0x79, 0xa9, 0xbf, 0xd2, 0xef, 0x55, 0x16, 0x9b, 0x7b, 0x11, 0x1d, 0x25, 0xa4, 0xaa, + 0x7f, 0xd5, 0xc0, 0xdc, 0x4e, 0xb3, 0x7e, 0xe0, 0x5a, 0x64, 0x02, 0x05, 0x5d, 0x4f, 0x14, 0xf4, + 0xed, 0x4b, 0x4a, 0x42, 0x38, 0x35, 0xb6, 0x9c, 0xbf, 0x0a, 0xcb, 0x59, 0xc8, 0xa8, 0x7e, 0xb4, + 0x01, 0xf2, 0x0e, 0xb6, 0x89, 0x74, 0x7d, 0x21, 0xd2, 0x39, 0xc0, 0x36, 0x41, 0x92, 0x03, 0x7f, + 0x04, 0x66, 0x1d, 0xd7, 0x22, 0xf5, 0x5d, 0xe9, 0xc0, 0x82, 0x71, 0x43, 0xc9, 0xcc, 0x1e, 0x48, + 0x2a, 0x52, 0x5c, 0xf1, 0x94, 0xdc, 0xf5, 0xdc, 0x8e, 0xdb, 0xee, 0xbe, 0x20, 0xdd, 0x41, 0x72, + 0xcb, 0xa7, 0x3c, 0x8a, 0xd1, 0x51, 0x42, 0x0a, 0xb6, 0x40, 0x01, 0x77, 0x3a, 0xae, 0x89, 0x39, + 0x6e, 0x75, 0x88, 0xcc, 0xd8, 0xc2, 0x56, 0xed, 0x4b, 0x77, 0x0c, 0x2b, 0x42, 0x18, 0x47, 0x6a, + 0x22, 0x30, 0x63, 0xb9, 0xdf, 0xab, 0x14, 0xb6, 0x23, 0x1c, 0x14, 0x07, 0xad, 0xfe, 0x45, 0x03, + 0x05, 0x75, 0xeb, 0x09, 0xb4, 0xb0, 0x67, 0xc9, 0x16, 0xf6, 0x83, 0x2b, 0xc4, 0x6b, 0x4c, 0x03, + 0x33, 0x87, 0x6e, 0xcb, 0xee, 0x75, 0x04, 0xe6, 0x2c, 0x19, 0x34, 0x56, 0xd4, 0x24, 0xf4, 0x9d, + 0x2b, 0x40, 0xab, 0x0e, 0xb9, 0xac, 0x0c, 0xcc, 0x85, 0x67, 0x86, 0x06, 0x50, 0xd5, 0xaf, 0x73, + 0x00, 0xee, 0x34, 0xeb, 0xa9, 0xfe, 0x30, 0x81, 0xb4, 0xa6, 0x60, 0x51, 0x64, 0xce, 0x20, 0x37, + 0x54, 0x7a, 0xff, 0xe4, 0x8a, 0x91, 0xc0, 0x2d, 0xd2, 0x69, 0x92, 0x0e, 0x31, 0xb9, 0xeb, 0x87, + 0x49, 0x76, 0x10, 0x03, 0x43, 0x09, 0x68, 0xb8, 0x0b, 0x56, 0x06, 0xed, 0xae, 0x83, 0x19, 0x13, + 0xc9, 0x5d, 0xcc, 0xc9, 0x64, 0x2e, 0x2a, 0x17, 0x57, 0x9a, 0x29, 0x3e, 0xca, 0x68, 0xc0, 0x37, + 0x60, 0xde, 0x8c, 0x77, 0xd6, 0x4b, 0xd2, 0x46, 0x1f, 0x2c, 0x2c, 0xfa, 0xab, 0x00, 0x3b, 0x9c, + 0xf2, 0xae, 0xb1, 0x28, 0x52, 0x66, 0xd8, 0x82, 0x87, 0x68, 0x90, 0x81, 0x55, 0x1b, 0x9f, 0x53, + 0x3b, 0xb0, 0xc3, 0xe4, 0x6e, 0xd2, 0xdf, 0x10, 0xd9, 0x7f, 0xaf, 0x6f, 0x42, 0xb6, 0xbe, 0x46, + 0x1a, 0x0c, 0x65, 0xf1, 0xab, 0xff, 0xd2, 0xc0, 0xad, 0x6c, 0xe0, 0x27, 0x50, 0x20, 0xcd, 0x64, + 0x81, 0xe8, 0x97, 0x64, 0x71, 0xca, 0xc1, 0x31, 0xb5, 0xf2, 0xc7, 0x59, 0xb0, 0x18, 0x8f, 0xe1, + 0x04, 0x12, 0xf8, 0xa7, 0xa0, 0xe0, 0xf9, 0xee, 0x19, 0x65, 0xd4, 0x75, 0x88, 0xaf, 0xba, 0xe3, + 0x9a, 0x52, 0x29, 0x1c, 0x46, 0x2c, 0x14, 0x97, 0x83, 0x1d, 0x00, 0x3c, 0xec, 0x63, 0x9b, 0x70, + 0x51, 0xc9, 0x39, 0xf9, 0x06, 0x8f, 0xbf, 0xf4, 0x06, 0xf1, 0x6b, 0xe9, 0x87, 0x43, 0xd5, 0x3d, + 0x87, 0xfb, 0xdd, 0xc8, 0xc5, 0x88, 0x81, 0x62, 0xf8, 0xf0, 0x14, 0x2c, 0xf9, 0xc4, 0xec, 0x60, + 0x6a, 0xab, 0xb1, 0x9e, 0x97, 0x6e, 0xee, 0x89, 0xf1, 0x8a, 0xe2, 0x8c, 0x8b, 0x5e, 0xe5, 0x7e, + 0x76, 0x45, 0xd7, 0x0f, 0x89, 0xcf, 0x28, 0xe3, 0xc4, 0xe1, 0x61, 0xea, 0x24, 0x74, 0x50, 0x12, + 0x5b, 0x8c, 0x00, 0x5b, 0x0c, 0xc8, 0x97, 0x1e, 0xa7, 0xae, 0xc3, 0x8a, 0x33, 0xd1, 0x08, 0x68, + 0xc4, 0xe8, 0x28, 0x21, 0x05, 0xf7, 0xc1, 0xba, 0xe8, 0xd6, 0xbf, 0x0e, 0x0d, 0xec, 0x9d, 0x7b, + 0xd8, 0x11, 0x4f, 0x55, 0x9c, 0x95, 0xb3, 0xb8, 0x28, 0xb6, 0xa3, 0xed, 0x11, 0x7c, 0x34, 0x52, + 0x0b, 0xbe, 0x01, 0xab, 0xe1, 0x7a, 0x64, 0x50, 0xc7, 0xa2, 0x4e, 0x5b, 0x2c, 0x47, 0x72, 0x2d, + 0x58, 0x30, 0xee, 0x8a, 0xda, 0x78, 0x9d, 0x66, 0x5e, 0x8c, 0x22, 0xa2, 0x2c, 0x08, 0x7c, 0x0f, + 0x56, 0xa5, 0x45, 0x62, 0xa9, 0xc6, 0x42, 0x09, 0x2b, 0xce, 0x67, 0x77, 0x1b, 0xf1, 0x74, 0x22, + 0x91, 0x06, 0xed, 0x67, 0xd0, 0xa6, 0x8e, 0x88, 0x6f, 0x1b, 0xdf, 0x57, 0xf1, 0x5a, 0xdd, 0x4e, + 0x43, 0xa1, 0x2c, 0x7a, 0xe9, 0x29, 0x58, 0x4e, 0x05, 0x1c, 0xae, 0x80, 0xdc, 0x29, 0xe9, 0x86, + 0xf3, 0x1a, 0x89, 0x9f, 0x70, 0x1d, 0xcc, 0x9c, 0xe1, 0x4e, 0x40, 0xc2, 0x0c, 0x44, 0xe1, 0xe1, + 0xc9, 0xf4, 0x63, 0xad, 0xfa, 0x0f, 0x0d, 0x24, 0x1a, 0xdb, 0x04, 0x8a, 0xbb, 0x91, 0x2c, 0xee, + 0xcd, 0xab, 0x26, 0xf6, 0x98, 0xb2, 0xfe, 0x9d, 0x06, 0x16, 0xe3, 0x5b, 0x20, 0xbc, 0x07, 0xe6, + 0x71, 0x60, 0x51, 0xe2, 0x98, 0x83, 0x9d, 0x65, 0xe8, 0xcd, 0xb6, 0xa2, 0xa3, 0xa1, 0x84, 0xd8, + 0x11, 0xc9, 0xb9, 0x47, 0x7d, 0x2c, 0x32, 0xad, 0x49, 0x4c, 0xd7, 0xb1, 0x98, 0x7c, 0xa6, 0x5c, + 0xd8, 0x28, 0xf7, 0xd2, 0x4c, 0x94, 0x95, 0xaf, 0xfe, 0x79, 0x1a, 0xac, 0x84, 0x09, 0x12, 0x7e, + 0x22, 0xd8, 0xc4, 0xe1, 0x13, 0x68, 0x2f, 0x28, 0xb1, 0xf6, 0xdd, 0xbf, 0x7c, 0x25, 0x8a, 0xbc, + 0x1b, 0xb7, 0xff, 0xc1, 0xb7, 0x60, 0x96, 0x71, 0xcc, 0x03, 0x26, 0xc7, 0x5f, 0x61, 0x6b, 0xeb, + 0x5a, 0xa8, 0x52, 0x33, 0xda, 0xff, 0xc2, 0x33, 0x52, 0x88, 0xd5, 0x7f, 0x6a, 0x60, 0x3d, 0xad, + 0x32, 0x81, 0x84, 0x7b, 0x95, 0x4c, 0xb8, 0x7b, 0xd7, 0xb9, 0xd1, 0x98, 0xa4, 0xfb, 0x8f, 0x06, + 0x6e, 0x65, 0x2e, 0x2f, 0xe7, 0xac, 0xe8, 0x55, 0x5e, 0xaa, 0x23, 0x1e, 0x44, 0xeb, 0xb3, 0xec, + 0x55, 0x87, 0x23, 0xf8, 0x68, 0xa4, 0x16, 0x7c, 0x07, 0x56, 0xa8, 0xd3, 0xa1, 0x0e, 0x51, 0x63, + 0x39, 0x0a, 0xf7, 0xc8, 0x86, 0x92, 0x46, 0x96, 0x61, 0x5e, 0x17, 0xdb, 0x4b, 0x3d, 0x85, 0x82, + 0x32, 0xb8, 0xd5, 0x7f, 0x8f, 0x08, 0x8f, 0x5c, 0x2b, 0x45, 0x45, 0x49, 0x0a, 0xf1, 0x33, 0x15, + 0xa5, 0xe8, 0x68, 0x28, 0x21, 0x33, 0x48, 0x3e, 0x85, 0x72, 0xf4, 0x7a, 0x19, 0x24, 0x35, 0x63, + 0x19, 0x24, 0xcf, 0x48, 0x21, 0x0a, 0x4f, 0xc4, 0xda, 0x16, 0x5b, 0xcf, 0x86, 0x9e, 0x1c, 0x28, + 0x3a, 0x1a, 0x4a, 0x54, 0xbf, 0xc9, 0x8d, 0x88, 0x92, 0x4c, 0xc5, 0xd8, 0x95, 0x06, 0x5f, 0xf8, + 0xe9, 0x2b, 0x59, 0xc3, 0x2b, 0x59, 0xf0, 0x4f, 0x1a, 0x80, 0x78, 0x08, 0xd1, 0x18, 0xa4, 0x6a, + 0x98, 0x4f, 0xcf, 0xaf, 0x5f, 0x21, 0xfa, 0x76, 0x06, 0x2c, 0x9c, 0xd5, 0x25, 0xe5, 0x04, 0xcc, + 0x0a, 0xa0, 0x11, 0x1e, 0x40, 0x0a, 0x0a, 0x21, 0x75, 0xcf, 0xf7, 0x5d, 0x5f, 0x95, 0xec, 0xed, + 0xcb, 0x1d, 0x92, 0xe2, 0x46, 0x59, 0x7e, 0x13, 0x45, 0xfa, 0x17, 0xbd, 0x4a, 0x21, 0xc6, 0x47, + 0x71, 0x6c, 0x61, 0xca, 0x22, 0x91, 0xa9, 0xfc, 0x77, 0x30, 0xb5, 0x4b, 0xc6, 0x9b, 0x8a, 0x61, + 0x97, 0xf6, 0xc0, 0xf7, 0xc6, 0x3c, 0xd0, 0xb5, 0x66, 0xdb, 0xef, 0x35, 0x10, 0xb7, 0x01, 0xf7, + 0x41, 0x9e, 0x53, 0x55, 0x89, 0x85, 0xad, 0xbb, 0x57, 0xeb, 0x30, 0x47, 0xd4, 0x26, 0x51, 0xa3, + 0x14, 0x27, 0x24, 0x51, 0xe0, 0x1d, 0x30, 0x67, 0x13, 0xc6, 0x70, 0x5b, 0x59, 0x8e, 0x3e, 0xa0, + 0x1a, 0x21, 0x19, 0x0d, 0xf8, 0xd5, 0x47, 0x60, 0x6d, 0xc4, 0x27, 0x29, 0xac, 0x80, 0x19, 0x53, + 0xfe, 0xa5, 0x20, 0x1c, 0x9a, 0x31, 0x16, 0x44, 0x97, 0xd9, 0x91, 0xff, 0x25, 0x84, 0x74, 0xe3, + 0x67, 0x1f, 0x3e, 0x97, 0xa7, 0x3e, 0x7e, 0x2e, 0x4f, 0x7d, 0xfa, 0x5c, 0x9e, 0xfa, 0x6d, 0xbf, + 0xac, 0x7d, 0xe8, 0x97, 0xb5, 0x8f, 0xfd, 0xb2, 0xf6, 0xa9, 0x5f, 0xd6, 0xfe, 0xd7, 0x2f, 0x6b, + 0x7f, 0xf8, 0x7f, 0x79, 0xea, 0x6d, 0x69, 0xfc, 0xbf, 0xb5, 0xdf, 0x06, 0x00, 0x00, 0xff, 0xff, + 0xee, 0x44, 0x0b, 0xed, 0xe3, 0x15, 0x00, 0x00, } func (m *CSIDriver) Marshal() (dAtA []byte, err error) { @@ -826,6 +827,16 @@ func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SELinuxMount != nil { + i-- + if *m.SELinuxMount { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } if m.RequiresRepublish != nil { i-- if *m.RequiresRepublish { @@ -1795,6 +1806,9 @@ func (m *CSIDriverSpec) Size() (n int) { if m.RequiresRepublish != nil { n += 2 } + if m.SELinuxMount != nil { + n += 2 + } return n } @@ -2148,6 +2162,7 @@ func (this *CSIDriverSpec) String() string { `FSGroupPolicy:` + valueToStringGenerated(this.FSGroupPolicy) + `,`, `TokenRequests:` + repeatedStringForTokenRequests + `,`, `RequiresRepublish:` + valueToStringGenerated(this.RequiresRepublish) + `,`, + `SELinuxMount:` + valueToStringGenerated(this.SELinuxMount) + `,`, `}`, }, "") return s @@ -2844,6 +2859,27 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.RequiresRepublish = &b + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxMount", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.SELinuxMount = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index 943900fa6..2b354dd47 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -49,7 +49,7 @@ message CSIDriver { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. optional CSIDriverSpec spec = 2; } @@ -82,16 +82,15 @@ message CSIDriverSpec { // +optional optional bool attachRequired = 1; - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name @@ -112,14 +111,14 @@ message CSIDriverSpec { // +optional optional bool podInfoOnMount = 2; - // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html // A driver can support one or more of these modes and @@ -130,10 +129,9 @@ message CSIDriverSpec { // +optional repeated string volumeLifecycleModes = 3; - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -149,7 +147,7 @@ message CSIDriverSpec { // +optional optional bool storageCapacity = 4; - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -159,10 +157,11 @@ message CSIDriverSpec { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional optional string fsGroupPolicy = 5; - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -182,7 +181,7 @@ message CSIDriverSpec { // +listType=atomic repeated TokenRequest tokenRequests = 6; - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -192,6 +191,28 @@ message CSIDriverSpec { // // +optional optional bool requiresRepublish = 7; + + // seLinuxMount specifies if the CSI driver supports "-o context" + // mount option. + // + // When "true", the CSI driver must ensure that all volumes provided by this CSI + // driver can be mounted separately with different `-o context` options. This is + // typical for storage backends that provide volumes as filesystems on block + // devices or as independent shared volumes. + // Kubernetes will call NodeStage / NodePublish with "-o context=xyz" mount + // option when mounting a ReadWriteOncePod volume used in Pod that has + // explicitly set SELinux context. In the future, it may be expanded to other + // volume AccessModes. In any case, Kubernetes will ensure that the volume is + // mounted only with a single SELinux context. + // + // When "false", Kubernetes won't pass any special SELinux mount options to the driver. + // This is typical for volumes that represent subdirectories of a bigger shared filesystem. + // + // Default is "false". + // + // +featureGate=SELinuxMountReadWriteOncePod + // +optional + optional bool seLinuxMount = 8; } // DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. @@ -215,7 +236,7 @@ message CSINode { // CSINodeDriver holds information about the specification of one CSI driver installed on a node message CSINodeDriver { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. optional string name = 1; @@ -306,7 +327,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -315,7 +336,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -323,7 +344,7 @@ message CSIStorageCapacity { // This field is immutable. optional string storageClassName = 3; - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -335,7 +356,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -357,7 +378,7 @@ message CSIStorageCapacityList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name repeated CSIStorageCapacity items = 2; @@ -374,36 +395,36 @@ message StorageClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. optional string provisioner = 2; - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional map parameters = 3; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional optional string reclaimPolicy = 4; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional repeated string mountOptions = 5; - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand // +optional optional bool allowVolumeExpansion = 6; - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional optional string volumeBindingMode = 7; - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -419,17 +440,17 @@ message StorageClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of StorageClasses + // items is the list of StorageClasses repeated StorageClass items = 2; } // TokenRequest contains parameters of a service account token. message TokenRequest { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. optional string audience = 1; - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec" // // +optional @@ -446,11 +467,11 @@ message VolumeAttachment { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. optional VolumeAttachmentSpec spec = 2; - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -464,7 +485,7 @@ message VolumeAttachmentList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } @@ -473,7 +494,7 @@ message VolumeAttachmentList { // in future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; @@ -489,39 +510,39 @@ message VolumeAttachmentSource { // VolumeAttachmentSpec is the specification of a VolumeAttachment request. message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). optional string attacher = 1; - // Source represents the volume that should be attached. + // source represents the volume that should be attached. optional VolumeAttachmentSource source = 2; - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. optional string nodeName = 3; } // VolumeAttachmentStatus is the status of a VolumeAttachment request. message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. optional bool attached = 1; - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional map attachmentMetadata = 2; - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional optional VolumeError attachError = 3; - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -530,11 +551,11 @@ message VolumeAttachmentStatus { // VolumeError captures an error encountered during a volume operation. message VolumeError { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -543,7 +564,7 @@ message VolumeError { // VolumeNodeResources is a set of resource limits for scheduling of volumes. message VolumeNodeResources { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is nil, then the supported number of volumes on this node is unbounded. diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index b39414b96..4c39b49cc 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -36,41 +36,42 @@ import ( // according to etcd is in ObjectMeta.Name. type StorageClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"` - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand // +optional AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -87,12 +88,13 @@ type StorageClass struct { // StorageClassList is a collection of storage classes. type StorageClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of StorageClasses + // items is the list of StorageClasses Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -130,11 +132,11 @@ type VolumeAttachment struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -149,25 +151,26 @@ type VolumeAttachment struct { // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - // Source represents the volume that should be attached. + // source represents the volume that should be attached. Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` } @@ -176,7 +179,7 @@ type VolumeAttachmentSpec struct { // in future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` @@ -192,26 +195,26 @@ type VolumeAttachmentSource struct { // VolumeAttachmentStatus is the status of a VolumeAttachment request. type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -220,11 +223,11 @@ type VolumeAttachmentStatus struct { // VolumeError captures an error encountered during a volume operation. type VolumeError struct { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -259,7 +262,7 @@ type CSIDriver struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` } @@ -299,16 +302,15 @@ type CSIDriverSpec struct { // +optional AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"` - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name @@ -329,14 +331,14 @@ type CSIDriverSpec struct { // +optional PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` - // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html // A driver can support one or more of these modes and @@ -347,11 +349,9 @@ type CSIDriverSpec struct { // +optional VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. - // + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -367,7 +367,7 @@ type CSIDriverSpec struct { // +optional StorageCapacity *bool `json:"storageCapacity,omitempty" protobuf:"bytes,4,opt,name=storageCapacity"` - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -377,10 +377,11 @@ type CSIDriverSpec struct { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"` - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -400,7 +401,7 @@ type CSIDriverSpec struct { // +listType=atomic TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"` - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -410,6 +411,28 @@ type CSIDriverSpec struct { // // +optional RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"` + + // seLinuxMount specifies if the CSI driver supports "-o context" + // mount option. + // + // When "true", the CSI driver must ensure that all volumes provided by this CSI + // driver can be mounted separately with different `-o context` options. This is + // typical for storage backends that provide volumes as filesystems on block + // devices or as independent shared volumes. + // Kubernetes will call NodeStage / NodePublish with "-o context=xyz" mount + // option when mounting a ReadWriteOncePod volume used in Pod that has + // explicitly set SELinux context. In the future, it may be expanded to other + // volume AccessModes. In any case, Kubernetes will ensure that the volume is + // mounted only with a single SELinux context. + // + // When "false", Kubernetes won't pass any special SELinux mount options to the driver. + // This is typical for volumes that represent subdirectories of a bigger shared filesystem. + // + // Default is "false". + // + // +featureGate=SELinuxMountReadWriteOncePod + // +optional + SELinuxMount *bool `json:"seLinuxMount,omitempty" protobuf:"varint,8,opt,name=seLinuxMount"` } // FSGroupPolicy specifies if a CSI Driver supports modifying @@ -445,12 +468,11 @@ type VolumeLifecycleMode string // TokenRequest contains parameters of a service account token. type TokenRequest struct { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. - // Audience string `json:"audience" protobuf:"bytes,1,opt,name=audience"` - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec" // // +optional @@ -518,7 +540,7 @@ type CSINodeSpec struct { // CSINodeDriver holds information about the specification of one CSI driver installed on a node type CSINodeDriver struct { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` @@ -554,7 +576,7 @@ type CSINodeDriver struct { // VolumeNodeResources is a set of resource limits for scheduling of volumes. type VolumeNodeResources struct { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is nil, then the supported number of volumes on this node is unbounded. @@ -613,6 +635,7 @@ type CSINodeList struct { // node. type CSIStorageCapacity struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. The name has no particular meaning. It must be // be a DNS subdomain (dots allowed, 253 characters). To ensure that // there are no conflicts with other CSI drivers on the cluster, the recommendation @@ -625,7 +648,7 @@ type CSIStorageCapacity struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -634,7 +657,7 @@ type CSIStorageCapacity struct { // +optional NodeTopology *metav1.LabelSelector `json:"nodeTopology,omitempty" protobuf:"bytes,2,opt,name=nodeTopology"` - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -642,7 +665,7 @@ type CSIStorageCapacity struct { // This field is immutable. StorageClassName string `json:"storageClassName" protobuf:"bytes,3,name=storageClassName"` - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -654,7 +677,7 @@ type CSIStorageCapacity struct { // +optional Capacity *resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,4,opt,name=capacity"` - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -677,12 +700,13 @@ type CSIStorageCapacity struct { // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. type CSIStorageCapacityList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index d810b4e4c..0f2718b9c 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIDriver = map[string]string{ "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. CSI drivers do not need to create the CSIDriver object directly. Instead they may use the cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically creates a CSIDriver object representing the driver. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the CSI Driver.", + "spec": "spec represents the specification of the CSI Driver.", } func (CSIDriver) SwaggerDoc() map[string]string { @@ -50,12 +50,13 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", - "volumeLifecycleModes": "VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is immutable.", - "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", - "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", - "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", - "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is immutable.", + "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", + "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "tokenRequests": "tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", + "requiresRepublish": "requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", + "seLinuxMount": "seLinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -74,7 +75,7 @@ func (CSINode) SwaggerDoc() map[string]string { var map_CSINodeDriver = map[string]string{ "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", - "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", + "name": "name represents the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", "allocatable": "allocatable represents the volume resources of a node that are available for scheduling.", @@ -106,10 +107,10 @@ func (CSINodeSpec) SwaggerDoc() map[string]string { var map_CSIStorageCapacity = map[string]string{ "": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node.", "metadata": "Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "nodeTopology": "NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", - "storageClassName": "The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", - "capacity": "Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", - "maximumVolumeSize": "MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", + "nodeTopology": "nodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", + "storageClassName": "storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", + "capacity": "capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", + "maximumVolumeSize": "maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", } func (CSIStorageCapacity) SwaggerDoc() map[string]string { @@ -119,7 +120,7 @@ func (CSIStorageCapacity) SwaggerDoc() map[string]string { var map_CSIStorageCapacityList = map[string]string{ "": "CSIStorageCapacityList is a collection of CSIStorageCapacity objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of CSIStorageCapacity objects.", + "items": "items is the list of CSIStorageCapacity objects.", } func (CSIStorageCapacityList) SwaggerDoc() map[string]string { @@ -129,13 +130,13 @@ func (CSIStorageCapacityList) SwaggerDoc() map[string]string { var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "provisioner": "Provisioner indicates the type of the provisioner.", - "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", - "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", - "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", - "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", - "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", - "allowedTopologies": "Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", + "provisioner": "provisioner indicates the type of the provisioner.", + "parameters": "parameters holds the parameters for the provisioner that should create volumes of this storage class.", + "reclaimPolicy": "reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. Defaults to Delete.", + "mountOptions": "mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", + "allowVolumeExpansion": "allowVolumeExpansion shows whether the storage class allow volume expand", + "volumeBindingMode": "volumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", + "allowedTopologies": "allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", } func (StorageClass) SwaggerDoc() map[string]string { @@ -145,7 +146,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of StorageClasses", + "items": "items is the list of StorageClasses", } func (StorageClassList) SwaggerDoc() map[string]string { @@ -154,8 +155,8 @@ func (StorageClassList) SwaggerDoc() map[string]string { var map_TokenRequest = map[string]string{ "": "TokenRequest contains parameters of a service account token.", - "audience": "Audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", - "expirationSeconds": "ExpirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\"", + "audience": "audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", + "expirationSeconds": "expirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\"", } func (TokenRequest) SwaggerDoc() map[string]string { @@ -165,8 +166,8 @@ func (TokenRequest) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", + "spec": "spec represents specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "status represents status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } func (VolumeAttachment) SwaggerDoc() map[string]string { @@ -176,7 +177,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", + "items": "items is the list of VolumeAttachments", } func (VolumeAttachmentList) SwaggerDoc() map[string]string { @@ -185,7 +186,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { var map_VolumeAttachmentSource = map[string]string{ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", + "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } func (VolumeAttachmentSource) SwaggerDoc() map[string]string { @@ -194,9 +195,9 @@ func (VolumeAttachmentSource) SwaggerDoc() map[string]string { var map_VolumeAttachmentSpec = map[string]string{ "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", + "attacher": "attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "source represents the volume that should be attached.", + "nodeName": "nodeName represents the node that the volume should be attached to.", } func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { @@ -205,10 +206,10 @@ func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { var map_VolumeAttachmentStatus = map[string]string{ "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", + "attached": "attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "attachError represents the last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "detachError represents the last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", } func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { @@ -217,8 +218,8 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", + "time": "time represents the time the error was encountered.", + "message": "message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { @@ -227,7 +228,7 @@ func (VolumeError) SwaggerDoc() map[string]string { var map_VolumeNodeResources = map[string]string{ "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", - "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.", + "count": "count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.", } func (VolumeNodeResources) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index 5411ed8c0..f0450182b 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -127,6 +127,11 @@ func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { *out = new(bool) **out = **in } + if in.SELinuxMount != nil { + in, out := &in.SELinuxMount, &out.SELinuxMount + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go b/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go new file mode 100644 index 000000000..f02fa8e43 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go @@ -0,0 +1,49 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package equality + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" +) + +// Semantic can do semantic deep equality checks for api objects. +// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true +var Semantic = conversion.EqualitiesOrDie( + func(a, b resource.Quantity) bool { + // Ignore formatting, only care that numeric value stayed the same. + // TODO: if we decide it's important, it should be safe to start comparing the format. + // + // Uninitialized quantities are equivalent to 0 quantities. + return a.Cmp(b) == 0 + }, + func(a, b metav1.MicroTime) bool { + return a.UTC() == b.UTC() + }, + func(a, b metav1.Time) bool { + return a.UTC() == b.UTC() + }, + func(a, b labels.Selector) bool { + return a.String() == b.String() + }, + func(a, b fields.Selector) bool { + return a.String() == b.String() + }, +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS index 155648acb..1a9f5e770 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -2,7 +2,6 @@ reviewers: - thockin - - lavalamp - smarterclayton - wojtek-t - deads2k diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index fab187a6a..57e0e71f6 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -87,21 +87,21 @@ func (e *StatusError) DebugError() (string, []interface{}) { // HasStatusCause returns true if the provided error has a details cause // with the provided type name. +// It supports wrapped errors and returns false when the error is nil. func HasStatusCause(err error, name metav1.CauseType) bool { _, ok := StatusCause(err, name) return ok } // StatusCause returns the named cause from the provided error if it exists and -// the error is of the type APIStatus. Otherwise it returns false. +// the error unwraps to the type APIStatus. Otherwise it returns false. func StatusCause(err error, name metav1.CauseType) (metav1.StatusCause, bool) { - apierr, ok := err.(APIStatus) - if !ok || apierr == nil || apierr.Status().Details == nil { - return metav1.StatusCause{}, false - } - for _, cause := range apierr.Status().Details.Causes { - if cause.Type == name { - return cause, true + status, ok := err.(APIStatus) + if (ok || errors.As(err, &status)) && status.Status().Details != nil { + for _, cause := range status.Status().Details.Causes { + if cause.Type == name { + return cause, true + } } } return metav1.StatusCause{}, false @@ -757,7 +757,8 @@ func IsRequestEntityTooLargeError(err error) bool { // and may be the result of another HTTP actor. // It supports wrapped errors and returns false when the error is nil. func IsUnexpectedServerError(err error) bool { - if status := APIStatus(nil); errors.As(err, &status) && status.Status().Details != nil { + status, ok := err.(APIStatus) + if (ok || errors.As(err, &status)) && status.Status().Details != nil { for _, cause := range status.Status().Details.Causes { if cause.Type == metav1.CauseTypeUnexpectedServerResponse { return true @@ -770,8 +771,8 @@ func IsUnexpectedServerError(err error) bool { // IsUnexpectedObjectError determines if err is due to an unexpected object from the master. // It supports wrapped errors and returns false when the error is nil. func IsUnexpectedObjectError(err error) bool { - uoe := &UnexpectedObjectError{} - return err != nil && errors.As(err, &uoe) + uoe, ok := err.(*UnexpectedObjectError) + return err != nil && (ok || errors.As(err, &uoe)) } // SuggestsClientDelay returns true if this error suggests a client delay as well as the @@ -780,7 +781,8 @@ func IsUnexpectedObjectError(err error) bool { // request delay without retry. // It supports wrapped errors and returns false when the error is nil. func SuggestsClientDelay(err error) (int, bool) { - if t := APIStatus(nil); errors.As(err, &t) && t.Status().Details != nil { + t, ok := err.(APIStatus) + if (ok || errors.As(err, &t)) && t.Status().Details != nil { switch t.Status().Reason { // this StatusReason explicitly requests the caller to delay the action case metav1.StatusReasonServerTimeout: @@ -798,14 +800,14 @@ func SuggestsClientDelay(err error) (int, bool) { // It supports wrapped errors and returns StatusReasonUnknown when // the error is nil or doesn't have a status. func ReasonForError(err error) metav1.StatusReason { - if status := APIStatus(nil); errors.As(err, &status) { + if status, ok := err.(APIStatus); ok || errors.As(err, &status) { return status.Status().Reason } return metav1.StatusReasonUnknown } func reasonAndCodeForError(err error) (metav1.StatusReason, int32) { - if status := APIStatus(nil); errors.As(err, &status) { + if status, ok := err.(APIStatus); ok || errors.As(err, &status) { return status.Status().Reason, status.Status().Code } return metav1.StatusReasonUnknown, 0 diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go index 00874f89c..60c8209de 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go @@ -24,9 +24,9 @@ import ( // SetStatusCondition sets the corresponding condition in conditions to newCondition. // conditions must be non-nil. -// 1. if the condition of the specified type already exists (all fields of the existing condition are updated to -// newCondition, LastTransitionTime is set to now if the new status differs from the old status) -// 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended) +// 1. if the condition of the specified type already exists (all fields of the existing condition are updated to +// newCondition, LastTransitionTime is set to now if the new status differs from the old status) +// 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended) func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) { if conditions == nil { return diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go index cbf5d0263..f36aa4ec2 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go @@ -17,6 +17,7 @@ limitations under the License. package meta import ( + "errors" "fmt" "k8s.io/apimachinery/pkg/runtime/schema" @@ -43,6 +44,11 @@ func (e *AmbiguousResourceError) Error() string { return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialResource) } +func (*AmbiguousResourceError) Is(target error) bool { + _, ok := target.(*AmbiguousResourceError) + return ok +} + // AmbiguousKindError is returned if the RESTMapper finds multiple matches for a kind type AmbiguousKindError struct { PartialKind schema.GroupVersionKind @@ -63,16 +69,16 @@ func (e *AmbiguousKindError) Error() string { return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialKind) } +func (*AmbiguousKindError) Is(target error) bool { + _, ok := target.(*AmbiguousKindError) + return ok +} + func IsAmbiguousError(err error) bool { if err == nil { return false } - switch err.(type) { - case *AmbiguousResourceError, *AmbiguousKindError: - return true - default: - return false - } + return errors.Is(err, &AmbiguousResourceError{}) || errors.Is(err, &AmbiguousKindError{}) } // NoResourceMatchError is returned if the RESTMapper can't find any match for a resource @@ -84,6 +90,11 @@ func (e *NoResourceMatchError) Error() string { return fmt.Sprintf("no matches for %v", e.PartialResource) } +func (*NoResourceMatchError) Is(target error) bool { + _, ok := target.(*NoResourceMatchError) + return ok +} + // NoKindMatchError is returned if the RESTMapper can't find any match for a kind type NoKindMatchError struct { // GroupKind is the API group and kind that was searched @@ -108,14 +119,14 @@ func (e *NoKindMatchError) Error() string { } } +func (*NoKindMatchError) Is(target error) bool { + _, ok := target.(*NoKindMatchError) + return ok +} + func IsNoMatchError(err error) bool { if err == nil { return false } - switch err.(type) { - case *NoResourceMatchError, *NoKindMatchError: - return true - default: - return false - } + return errors.Is(err, &NoResourceMatchError{}) || errors.Is(err, &NoKindMatchError{}) } diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go index 50468b533..1fdd32c4b 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go @@ -40,7 +40,7 @@ var ( // IsListType returns true if the provided Object has a slice called Items. // TODO: Replace the code in this check with an interface comparison by -// creating and enforcing that lists implement a list accessor. +// creating and enforcing that lists implement a list accessor. func IsListType(obj runtime.Object) bool { switch t := obj.(type) { case runtime.Unstructured: @@ -97,7 +97,7 @@ func getItemsPtr(list runtime.Object) (interface{}, error) { return nil, errExpectFieldItems } switch items.Kind() { - case reflect.Interface, reflect.Ptr: + case reflect.Interface, reflect.Pointer: target := reflect.TypeOf(items.Interface()).Elem() if target.Kind() != reflect.Slice { return nil, errExpectSliceItems @@ -112,8 +112,27 @@ func getItemsPtr(list runtime.Object) (interface{}, error) { // EachListItem invokes fn on each runtime.Object in the list. Any error immediately terminates // the loop. +// +// If items passed to fn are retained for different durations, and you want to avoid +// retaining all items in obj as long as any item is referenced, use EachListItemWithAlloc instead. func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { + return eachListItem(obj, fn, false) +} + +// EachListItemWithAlloc works like EachListItem, but avoids retaining references to the items slice in obj. +// It does this by making a shallow copy of non-pointer items in obj. +// +// If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency. +func EachListItemWithAlloc(obj runtime.Object, fn func(runtime.Object) error) error { + return eachListItem(obj, fn, true) +} + +// allocNew: Whether shallow copy is required when the elements in Object.Items are struct +func eachListItem(obj runtime.Object, fn func(runtime.Object) error, allocNew bool) error { if unstructured, ok := obj.(runtime.Unstructured); ok { + if allocNew { + return unstructured.EachListItemWithAlloc(fn) + } return unstructured.EachListItem(fn) } // TODO: Change to an interface call? @@ -130,7 +149,7 @@ func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { return nil } takeAddr := false - if elemType := items.Type().Elem(); elemType.Kind() != reflect.Ptr && elemType.Kind() != reflect.Interface { + if elemType := items.Type().Elem(); elemType.Kind() != reflect.Pointer && elemType.Kind() != reflect.Interface { if !items.Index(0).CanAddr() { return fmt.Errorf("unable to take address of items in %T for EachListItem", obj) } @@ -140,8 +159,19 @@ func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { for i := 0; i < len; i++ { raw := items.Index(i) if takeAddr { - raw = raw.Addr() + if allocNew { + // shallow copy to avoid retaining a reference to the original list item + itemCopy := reflect.New(raw.Type()) + // assign to itemCopy and type-assert + itemCopy.Elem().Set(raw) + // reflect.New will guarantee that itemCopy must be a pointer. + raw = itemCopy + } else { + raw = raw.Addr() + } } + // raw must be a pointer or an interface + // allocate a pointer is cheap switch item := raw.Interface().(type) { case *runtime.RawExtension: if err := fn(item.Object); err != nil { @@ -166,7 +196,23 @@ func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { // ExtractList returns obj's Items element as an array of runtime.Objects. // Returns an error if obj is not a List type (does not have an Items member). +// +// If items in the returned list are retained for different durations, and you want to avoid +// retaining all items in obj as long as any item is referenced, use ExtractListWithAlloc instead. func ExtractList(obj runtime.Object) ([]runtime.Object, error) { + return extractList(obj, false) +} + +// ExtractListWithAlloc works like ExtractList, but avoids retaining references to the items slice in obj. +// It does this by making a shallow copy of non-pointer items in obj. +// +// If the items in the returned list are not retained, or are retained for the same duration, use ExtractList instead for memory efficiency. +func ExtractListWithAlloc(obj runtime.Object) ([]runtime.Object, error) { + return extractList(obj, true) +} + +// allocNew: Whether shallow copy is required when the elements in Object.Items are struct +func extractList(obj runtime.Object, allocNew bool) ([]runtime.Object, error) { itemsPtr, err := GetItemsPtr(obj) if err != nil { return nil, err @@ -176,10 +222,17 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) { return nil, err } list := make([]runtime.Object, items.Len()) + if len(list) == 0 { + return list, nil + } + elemType := items.Type().Elem() + isRawExtension := elemType == rawExtensionObjectType + implementsObject := elemType.Implements(objectType) for i := range list { raw := items.Index(i) - switch item := raw.Interface().(type) { - case runtime.RawExtension: + switch { + case isRawExtension: + item := raw.Interface().(runtime.RawExtension) switch { case item.Object != nil: list[i] = item.Object @@ -189,8 +242,18 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) { default: list[i] = nil } - case runtime.Object: - list[i] = item + case implementsObject: + list[i] = raw.Interface().(runtime.Object) + case allocNew: + // shallow copy to avoid retaining a reference to the original list item + itemCopy := reflect.New(raw.Type()) + // assign to itemCopy and type-assert + itemCopy.Elem().Set(raw) + var ok bool + // reflect.New will guarantee that itemCopy must be a pointer. + if list[i], ok = itemCopy.Interface().(runtime.Object); !ok { + return nil, fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind()) + } default: var found bool if list[i], found = raw.Addr().Interface().(runtime.Object); !found { @@ -201,8 +264,12 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) { return list, nil } -// objectSliceType is the type of a slice of Objects -var objectSliceType = reflect.TypeOf([]runtime.Object{}) +var ( + // objectSliceType is the type of a slice of Objects + objectSliceType = reflect.TypeOf([]runtime.Object{}) + objectType = reflect.TypeOf((*runtime.Object)(nil)).Elem() + rawExtensionObjectType = reflect.TypeOf(runtime.RawExtension{}) +) // LenList returns the length of this list or 0 if it is not a list. func LenList(list runtime.Object) int { @@ -237,7 +304,7 @@ func SetList(list runtime.Object, objects []runtime.Object) error { slice := reflect.MakeSlice(items.Type(), len(objects), len(objects)) for i := range objects { dest := slice.Index(i) - if dest.Type() == reflect.TypeOf(runtime.RawExtension{}) { + if dest.Type() == rawExtensionObjectType { dest = dest.FieldByName("Object") } diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go index 81403393f..2551f07f5 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -130,7 +130,6 @@ func AsPartialObjectMetadata(m metav1.Object) *metav1.PartialObjectMetadata { Annotations: m.GetAnnotations(), OwnerReferences: m.GetOwnerReferences(), Finalizers: m.GetFinalizers(), - ZZZ_DeprecatedClusterName: m.GetZZZ_DeprecatedClusterName(), ManagedFields: m.GetManagedFields(), }, } @@ -600,7 +599,7 @@ func (a genericAccessor) SetFinalizers(finalizers []string) { func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { var ret []metav1.OwnerReference s := a.ownerReferences - if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { + if s.Kind() != reflect.Pointer || s.Elem().Kind() != reflect.Slice { klog.Errorf("expect %v to be a pointer to slice", s) return ret } @@ -618,7 +617,7 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { func (a genericAccessor) SetOwnerReferences(references []metav1.OwnerReference) { s := a.ownerReferences - if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { + if s.Kind() != reflect.Pointer || s.Elem().Kind() != reflect.Slice { klog.Errorf("expect %v to be a pointer to slice", s) } s = s.Elem() diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS index d1c9f5307..063fd285d 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS @@ -2,7 +2,6 @@ reviewers: - thockin - - lavalamp - smarterclayton - wojtek-t - derekwaynecarr diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto index 79abc0ff5..ddd0db8fb 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -30,8 +30,11 @@ option go_package = "k8s.io/apimachinery/pkg/api/resource"; // // The serialization format is: // +// ``` // ::= -// (Note that may be empty, from the "" case in .) +// +// (Note that may be empty, from the "" case in .) +// // ::= 0 | 1 | ... | 9 // ::= | // ::= | . | . | . @@ -39,10 +42,15 @@ option go_package = "k8s.io/apimachinery/pkg/api/resource"; // ::= | // ::= | | // ::= Ki | Mi | Gi | Ti | Pi | Ei -// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) +// +// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) +// // ::= m | "" | k | M | G | T | P | E -// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) +// +// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) +// // ::= "e" | "E" +// ``` // // No matter which of the three exponent forms is used, no quantity may represent // a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal @@ -56,14 +64,17 @@ option go_package = "k8s.io/apimachinery/pkg/api/resource"; // Before serializing, Quantity will be put in "canonical form". // This means that Exponent/suffix will be adjusted up or down (with a // corresponding increase or decrease in Mantissa) such that: -// a. No precision is lost -// b. No fractional digits will be emitted -// c. The exponent (or suffix) is as large as possible. +// +// - No precision is lost +// - No fractional digits will be emitted +// - The exponent (or suffix) is as large as possible. +// // The sign will be omitted unless the number is negative. // // Examples: -// 1.5 will be serialized as "1500m" -// 1.5Gi will be serialized as "1536Mi" +// +// - 1.5 will be serialized as "1500m" +// - 1.5Gi will be serialized as "1536Mi" // // Note that the quantity will NEVER be internally represented by a // floating point number. That is the whole point of this exercise. diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index 1ca31d8ca..b47d554b3 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -34,8 +34,11 @@ import ( // // The serialization format is: // +// ``` // ::= -// (Note that may be empty, from the "" case in .) +// +// (Note that may be empty, from the "" case in .) +// // ::= 0 | 1 | ... | 9 // ::= | // ::= | . | . | . @@ -43,10 +46,15 @@ import ( // ::= | // ::= | | // ::= Ki | Mi | Gi | Ti | Pi | Ei -// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) +// +// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) +// // ::= m | "" | k | M | G | T | P | E -// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) +// +// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) +// // ::= "e" | "E" +// ``` // // No matter which of the three exponent forms is used, no quantity may represent // a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal @@ -60,14 +68,17 @@ import ( // Before serializing, Quantity will be put in "canonical form". // This means that Exponent/suffix will be adjusted up or down (with a // corresponding increase or decrease in Mantissa) such that: -// a. No precision is lost -// b. No fractional digits will be emitted -// c. The exponent (or suffix) is as large as possible. +// +// - No precision is lost +// - No fractional digits will be emitted +// - The exponent (or suffix) is as large as possible. +// // The sign will be omitted unless the number is negative. // // Examples: -// 1.5 will be serialized as "1500m" -// 1.5Gi will be serialized as "1536Mi" +// +// - 1.5 will be serialized as "1500m" +// - 1.5Gi will be serialized as "1536Mi" // // Note that the quantity will NEVER be internally represented by a // floating point number. That is the whole point of this exercise. @@ -404,10 +415,10 @@ func (Quantity) OpenAPIV3OneOfTypes() []string { return []string{"string", "numb // CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity). // // Note about BinarySI: -// * If q.Format is set to BinarySI and q.Amount represents a non-zero value between -// -1 and +1, it will be emitted as if q.Format were DecimalSI. -// * Otherwise, if q.Format is set to BinarySI, fractional parts of q.Amount will be -// rounded up. (1.1i becomes 2i.) +// - If q.Format is set to BinarySI and q.Amount represents a non-zero value between +// -1 and +1, it will be emitted as if q.Format were DecimalSI. +// - Otherwise, if q.Format is set to BinarySI, fractional parts of q.Amount will be +// rounded up. (1.1i becomes 2i.) func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) { if q.IsZero() { return zeroBytes, nil @@ -643,7 +654,7 @@ func (q Quantity) MarshalJSON() ([]byte, error) { copy(out[1:], q.s) return out, nil } - result := make([]byte, int64QuantityExpectedBytes, int64QuantityExpectedBytes) + result := make([]byte, int64QuantityExpectedBytes) result[0] = '"' number, suffix := q.CanonicalizeBytes(result[1:1]) // if the same slice was returned to us that we passed in, avoid another allocation by copying number into diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go b/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go index 5ed7abe66..6ec527f9c 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go @@ -165,7 +165,7 @@ func (sh *suffixHandler) constructBytes(base, exponent int32, format Format) (s if exponent == 0 { return nil, true } - result := make([]byte, 8, 8) + result := make([]byte, 8) result[0] = 'e' number := strconv.AppendInt(result[1:1], int64(exponent), 10) if &result[1] == &number[0] { diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go b/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go new file mode 100644 index 000000000..9f20152e4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package validation contains generic api type validation functions. +package validation // import "k8s.io/apimachinery/pkg/api/validation" diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go b/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go new file mode 100644 index 000000000..e0b5b1490 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go @@ -0,0 +1,88 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// IsNegativeErrorMsg is a error message for value must be greater than or equal to 0. +const IsNegativeErrorMsg string = `must be greater than or equal to 0` + +// ValidateNameFunc validates that the provided name is valid for a given resource type. +// Not all resources have the same validation rules for names. Prefix is true +// if the name will have a value appended to it. If the name is not valid, +// this returns a list of descriptions of individual characteristics of the +// value that were not valid. Otherwise this returns an empty list or nil. +type ValidateNameFunc func(name string, prefix bool) []string + +// NameIsDNSSubdomain is a ValidateNameFunc for names that must be a DNS subdomain. +func NameIsDNSSubdomain(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1123Subdomain(name) +} + +// NameIsDNSLabel is a ValidateNameFunc for names that must be a DNS 1123 label. +func NameIsDNSLabel(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1123Label(name) +} + +// NameIsDNS1035Label is a ValidateNameFunc for names that must be a DNS 952 label. +func NameIsDNS1035Label(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1035Label(name) +} + +// ValidateNamespaceName can be used to check whether the given namespace name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateNamespaceName = NameIsDNSLabel + +// ValidateServiceAccountName can be used to check whether the given service account name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateServiceAccountName = NameIsDNSSubdomain + +// maskTrailingDash replaces the final character of a string with a subdomain safe +// value if it is a dash and if the length of this string is greater than 1. Note that +// this is used when a value could be appended to the string, see ValidateNameFunc +// for more info. +func maskTrailingDash(name string) string { + if len(name) > 1 && strings.HasSuffix(name, "-") { + return name[:len(name)-2] + "a" + } + return name +} + +// ValidateNonnegativeField validates that given value is not negative. +func ValidateNonnegativeField(value int64, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if value < 0 { + allErrs = append(allErrs, field.Invalid(fldPath, value, IsNegativeErrorMsg)) + } + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go new file mode 100644 index 000000000..593d7ba8c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go @@ -0,0 +1,265 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "strings" + + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// FieldImmutableErrorMsg is a error message for field is immutable. +const FieldImmutableErrorMsg string = `field is immutable` + +const TotalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB + +// BannedOwners is a black list of object that are not allowed to be owners. +var BannedOwners = map[schema.GroupVersionKind]struct{}{ + {Group: "", Version: "v1", Kind: "Event"}: {}, +} + +// ValidateAnnotations validates that a set of annotations are correctly defined. +func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for k := range annotations { + // The rule is QualifiedName except that case doesn't matter, so convert to lowercase before checking. + for _, msg := range validation.IsQualifiedName(strings.ToLower(k)) { + allErrs = append(allErrs, field.Invalid(fldPath, k, msg)) + } + } + if err := ValidateAnnotationsSize(annotations); err != nil { + allErrs = append(allErrs, field.TooLong(fldPath, "", TotalAnnotationSizeLimitB)) + } + return allErrs +} + +func ValidateAnnotationsSize(annotations map[string]string) error { + var totalSize int64 + for k, v := range annotations { + totalSize += (int64)(len(k)) + (int64)(len(v)) + } + if totalSize > (int64)(TotalAnnotationSizeLimitB) { + return fmt.Errorf("annotations size %d is larger than limit %d", totalSize, TotalAnnotationSizeLimitB) + } + return nil +} + +func validateOwnerReference(ownerReference metav1.OwnerReference, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + gvk := schema.FromAPIVersionAndKind(ownerReference.APIVersion, ownerReference.Kind) + // gvk.Group is empty for the legacy group. + if len(gvk.Version) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("apiVersion"), ownerReference.APIVersion, "version must not be empty")) + } + if len(gvk.Kind) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), ownerReference.Kind, "kind must not be empty")) + } + if len(ownerReference.Name) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), ownerReference.Name, "name must not be empty")) + } + if len(ownerReference.UID) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), ownerReference.UID, "uid must not be empty")) + } + if _, ok := BannedOwners[gvk]; ok { + allErrs = append(allErrs, field.Invalid(fldPath, ownerReference, fmt.Sprintf("%s is disallowed from being an owner", gvk))) + } + return allErrs +} + +// ValidateOwnerReferences validates that a set of owner references are correctly defined. +func ValidateOwnerReferences(ownerReferences []metav1.OwnerReference, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + firstControllerName := "" + for _, ref := range ownerReferences { + allErrs = append(allErrs, validateOwnerReference(ref, fldPath)...) + if ref.Controller != nil && *ref.Controller { + curControllerName := ref.Kind + "/" + ref.Name + if firstControllerName != "" { + allErrs = append(allErrs, field.Invalid(fldPath, ownerReferences, + fmt.Sprintf("Only one reference can have Controller set to true. Found \"true\" in references for %v and %v", firstControllerName, curControllerName))) + } else { + firstControllerName = curControllerName + } + } + } + return allErrs +} + +// ValidateFinalizerName validates finalizer names. +func ValidateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(stringValue) { + allErrs = append(allErrs, field.Invalid(fldPath, stringValue, msg)) + } + + return allErrs +} + +// ValidateNoNewFinalizers validates the new finalizers has no new finalizers compare to old finalizers. +func ValidateNoNewFinalizers(newFinalizers []string, oldFinalizers []string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + extra := sets.NewString(newFinalizers...).Difference(sets.NewString(oldFinalizers...)) + if len(extra) != 0 { + allErrs = append(allErrs, field.Forbidden(fldPath, fmt.Sprintf("no new finalizers can be added if the object is being deleted, found new finalizers %#v", extra.List()))) + } + return allErrs +} + +// ValidateImmutableField validates the new value and the old value are deeply equal. +func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if !apiequality.Semantic.DeepEqual(oldVal, newVal) { + allErrs = append(allErrs, field.Invalid(fldPath, newVal, FieldImmutableErrorMsg)) + } + return allErrs +} + +// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already +// been performed. +// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before. +func ValidateObjectMeta(objMeta *metav1.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList { + metadata, err := meta.Accessor(objMeta) + if err != nil { + var allErrs field.ErrorList + allErrs = append(allErrs, field.Invalid(fldPath, objMeta, err.Error())) + return allErrs + } + return ValidateObjectMetaAccessor(metadata, requiresNamespace, nameFn, fldPath) +} + +// ValidateObjectMetaAccessor validates an object's metadata on creation. It expects that name generation has already +// been performed. +// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before. +func ValidateObjectMetaAccessor(meta metav1.Object, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + if len(meta.GetGenerateName()) != 0 { + for _, msg := range nameFn(meta.GetGenerateName(), true) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("generateName"), meta.GetGenerateName(), msg)) + } + } + // If the generated name validates, but the calculated value does not, it's a problem with generation, and we + // report it here. This may confuse users, but indicates a programming bug and still must be validated. + // If there are multiple fields out of which one is required then add an or as a separator + if len(meta.GetName()) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name or generateName is required")) + } else { + for _, msg := range nameFn(meta.GetName(), false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), meta.GetName(), msg)) + } + } + if requiresNamespace { + if len(meta.GetNamespace()) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), "")) + } else { + for _, msg := range ValidateNamespaceName(meta.GetNamespace(), false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), meta.GetNamespace(), msg)) + } + } + } else { + if len(meta.GetNamespace()) != 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("namespace"), "not allowed on this type")) + } + } + + allErrs = append(allErrs, ValidateNonnegativeField(meta.GetGeneration(), fldPath.Child("generation"))...) + allErrs = append(allErrs, v1validation.ValidateLabels(meta.GetLabels(), fldPath.Child("labels"))...) + allErrs = append(allErrs, ValidateAnnotations(meta.GetAnnotations(), fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidateOwnerReferences(meta.GetOwnerReferences(), fldPath.Child("ownerReferences"))...) + allErrs = append(allErrs, ValidateFinalizers(meta.GetFinalizers(), fldPath.Child("finalizers"))...) + allErrs = append(allErrs, v1validation.ValidateManagedFields(meta.GetManagedFields(), fldPath.Child("managedFields"))...) + return allErrs +} + +// ValidateFinalizers tests if the finalizers name are valid, and if there are conflicting finalizers. +func ValidateFinalizers(finalizers []string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + hasFinalizerOrphanDependents := false + hasFinalizerDeleteDependents := false + for _, finalizer := range finalizers { + allErrs = append(allErrs, ValidateFinalizerName(finalizer, fldPath)...) + if finalizer == metav1.FinalizerOrphanDependents { + hasFinalizerOrphanDependents = true + } + if finalizer == metav1.FinalizerDeleteDependents { + hasFinalizerDeleteDependents = true + } + } + if hasFinalizerDeleteDependents && hasFinalizerOrphanDependents { + allErrs = append(allErrs, field.Invalid(fldPath, finalizers, fmt.Sprintf("finalizer %s and %s cannot be both set", metav1.FinalizerOrphanDependents, metav1.FinalizerDeleteDependents))) + } + return allErrs +} + +// ValidateObjectMetaUpdate validates an object's metadata when updated. +func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList { + newMetadata, err := meta.Accessor(newMeta) + if err != nil { + allErrs := field.ErrorList{} + allErrs = append(allErrs, field.Invalid(fldPath, newMeta, err.Error())) + return allErrs + } + oldMetadata, err := meta.Accessor(oldMeta) + if err != nil { + allErrs := field.ErrorList{} + allErrs = append(allErrs, field.Invalid(fldPath, oldMeta, err.Error())) + return allErrs + } + return ValidateObjectMetaAccessorUpdate(newMetadata, oldMetadata, fldPath) +} + +// ValidateObjectMetaAccessorUpdate validates an object's metadata when updated. +func ValidateObjectMetaAccessorUpdate(newMeta, oldMeta metav1.Object, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + // Finalizers cannot be added if the object is already being deleted. + if oldMeta.GetDeletionTimestamp() != nil { + allErrs = append(allErrs, ValidateNoNewFinalizers(newMeta.GetFinalizers(), oldMeta.GetFinalizers(), fldPath.Child("finalizers"))...) + } + + // Reject updates that don't specify a resource version + if len(newMeta.GetResourceVersion()) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceVersion"), newMeta.GetResourceVersion(), "must be specified for an update")) + } + + // Generation shouldn't be decremented + if newMeta.GetGeneration() < oldMeta.GetGeneration() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("generation"), newMeta.GetGeneration(), "must not be decremented")) + } + + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetName(), oldMeta.GetName(), fldPath.Child("name"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetNamespace(), oldMeta.GetNamespace(), fldPath.Child("namespace"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetUID(), oldMeta.GetUID(), fldPath.Child("uid"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetCreationTimestamp(), oldMeta.GetCreationTimestamp(), fldPath.Child("creationTimestamp"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetDeletionTimestamp(), oldMeta.GetDeletionTimestamp(), fldPath.Child("deletionTimestamp"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetDeletionGracePeriodSeconds(), oldMeta.GetDeletionGracePeriodSeconds(), fldPath.Child("deletionGracePeriodSeconds"))...) + + allErrs = append(allErrs, v1validation.ValidateLabels(newMeta.GetLabels(), fldPath.Child("labels"))...) + allErrs = append(allErrs, ValidateAnnotations(newMeta.GetAnnotations(), fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidateOwnerReferences(newMeta.GetOwnerReferences(), fldPath.Child("ownerReferences"))...) + allErrs = append(allErrs, v1validation.ValidateManagedFields(newMeta.GetManagedFields(), fldPath.Child("managedFields"))...) + + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go new file mode 100644 index 000000000..29c6a48b6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go @@ -0,0 +1,38 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// SetListOptionsDefaults sets defaults on the provided ListOptions if applicable. +// +// TODO(#115478): once the watch-list fg is always on we register this function in the scheme (via AddTypeDefaultingFunc). +// TODO(#115478): when the function is registered in the scheme remove all callers of this method. +func SetListOptionsDefaults(obj *ListOptions, isWatchListFeatureEnabled bool) { + if !isWatchListFeatureEnabled { + return + } + if obj.SendInitialEvents != nil || len(obj.ResourceVersionMatch) != 0 { + return + } + legacy := obj.ResourceVersion == "" || obj.ResourceVersion == "0" + if obj.Watch && legacy { + turnOnInitialEvents := true + obj.SendInitialEvents = &turnOnInitialEvents + obj.ResourceVersionMatch = metav1.ResourceVersionMatchNotOlderThan + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go index a49b5f2be..00d2b8c68 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go @@ -66,6 +66,31 @@ type ListOptions struct { // it does not recognize and will return a 410 error if the token can no longer be used because // it has expired. Continue string + + // `sendInitialEvents=true` may be set together with `watch=true`. + // In that case, the watch stream will begin with synthetic events to + // produce the current state of objects in the collection. Once all such + // events have been sent, a synthetic "Bookmark" event will be sent. + // The bookmark will report the ResourceVersion (RV) corresponding to the + // set of objects, and be marked with `"k8s.io/initial-events-end": "true"` annotation. + // Afterwards, the watch stream will proceed as usual, sending watch events + // corresponding to changes (subsequent to the RV) to objects watched. + // + // When `sendInitialEvents` option is set, we require `resourceVersionMatch` + // option to also be set. The semantic of the watch request is as following: + // - `resourceVersionMatch` = NotOlderThan + // is interpreted as "data at least as new as the provided `resourceVersion`" + // and the bookmark event is send when the state is synced + // to a `resourceVersion` at least as fresh as the one provided by the ListOptions. + // If `resourceVersion` is unset, this is interpreted as "consistent read" and the + // bookmark event is send when the state is synced at least to the moment + // when request started being processed. + // - `resourceVersionMatch` set to any other value or unset + // Invalid error is returned. + // + // Defaults to true if `resourceVersion=""` or `resourceVersion="0"` (for backward + // compatibility reasons) and to false otherwise. + SendInitialEvents *bool } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go index 6d212b846..a6552c276 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go @@ -115,6 +115,7 @@ func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) out.Limit = in.Limit out.Continue = in.Continue + out.SendInitialEvents = (*bool)(unsafe.Pointer(in.SendInitialEvents)) return nil } @@ -137,6 +138,7 @@ func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOption out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) out.Limit = in.Limit out.Continue = in.Continue + out.SendInitialEvents = (*bool)(unsafe.Pointer(in.SendInitialEvents)) return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go index 6e1eac5c7..af66a2ac4 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go @@ -75,6 +75,11 @@ func (in *ListOptions) DeepCopyInto(out *ListOptions) { *out = new(int64) **out = **in } + if in.SendInitialEvents != nil { + in, out := &in.SendInitialEvents, &out.SendInitialEvents + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 6e5f5e61b..1a641e7c1 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -1326,187 +1326,187 @@ func init() { } var fileDescriptor_cf52fa777ced5367 = []byte{ - // 2879 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x3a, 0xcd, 0x6f, 0x24, 0x47, - 0xbd, 0xee, 0x19, 0x8f, 0x3d, 0xf3, 0x1b, 0x8f, 0x3f, 0x6a, 0xbd, 0xef, 0xcd, 0xfa, 0xe9, 0x79, - 0x9c, 0x4e, 0x14, 0x6d, 0xde, 0x4b, 0xc6, 0xd9, 0x25, 0x44, 0x9b, 0x0d, 0x09, 0x78, 0x3c, 0xeb, - 0x8d, 0x93, 0x75, 0x6c, 0x95, 0x77, 0x17, 0x58, 0x22, 0x48, 0xbb, 0xbb, 0x3c, 0x6e, 0xdc, 0xd3, - 0x3d, 0xa9, 0xea, 0xf1, 0x66, 0xe0, 0x40, 0x0e, 0x20, 0x82, 0x84, 0xa2, 0x70, 0xe3, 0x84, 0x12, - 0xc1, 0x1f, 0x80, 0xb8, 0xc0, 0x1f, 0x80, 0x44, 0x8e, 0x41, 0x5c, 0x22, 0x81, 0x46, 0x89, 0x39, - 0x70, 0x44, 0x5c, 0x7d, 0x01, 0xd5, 0x47, 0x77, 0x57, 0xcf, 0xc7, 0xba, 0x27, 0xbb, 0x44, 0xdc, - 0xa6, 0x7f, 0xdf, 0x55, 0xf5, 0xab, 0xdf, 0x57, 0x0d, 0xec, 0x1c, 0x5f, 0x63, 0x75, 0x37, 0x58, - 0x3f, 0xee, 0x1e, 0x10, 0xea, 0x93, 0x90, 0xb0, 0xf5, 0x13, 0xe2, 0x3b, 0x01, 0x5d, 0x57, 0x08, - 0xab, 0xe3, 0xb6, 0x2d, 0xfb, 0xc8, 0xf5, 0x09, 0xed, 0xad, 0x77, 0x8e, 0x5b, 0x1c, 0xc0, 0xd6, - 0xdb, 0x24, 0xb4, 0xd6, 0x4f, 0xae, 0xac, 0xb7, 0x88, 0x4f, 0xa8, 0x15, 0x12, 0xa7, 0xde, 0xa1, - 0x41, 0x18, 0xa0, 0x27, 0x24, 0x57, 0x5d, 0xe7, 0xaa, 0x77, 0x8e, 0x5b, 0x1c, 0xc0, 0xea, 0x9c, - 0xab, 0x7e, 0x72, 0x65, 0xe5, 0x99, 0x96, 0x1b, 0x1e, 0x75, 0x0f, 0xea, 0x76, 0xd0, 0x5e, 0x6f, - 0x05, 0xad, 0x60, 0x5d, 0x30, 0x1f, 0x74, 0x0f, 0xc5, 0x97, 0xf8, 0x10, 0xbf, 0xa4, 0xd0, 0x95, - 0xb1, 0xa6, 0xd0, 0xae, 0x1f, 0xba, 0x6d, 0x32, 0x68, 0xc5, 0xca, 0xf3, 0xe7, 0x31, 0x30, 0xfb, - 0x88, 0xb4, 0xad, 0x41, 0x3e, 0xf3, 0x0f, 0x79, 0x28, 0x6e, 0xec, 0x6d, 0xdf, 0xa4, 0x41, 0xb7, - 0x83, 0xd6, 0x60, 0xda, 0xb7, 0xda, 0xa4, 0x6a, 0xac, 0x19, 0x97, 0x4b, 0x8d, 0xb9, 0x8f, 0xfa, - 0xb5, 0xa9, 0xd3, 0x7e, 0x6d, 0xfa, 0x75, 0xab, 0x4d, 0xb0, 0xc0, 0x20, 0x0f, 0x8a, 0x27, 0x84, - 0x32, 0x37, 0xf0, 0x59, 0x35, 0xb7, 0x96, 0xbf, 0x5c, 0xbe, 0xfa, 0x72, 0x3d, 0xcb, 0xfa, 0xeb, - 0x42, 0xc1, 0x5d, 0xc9, 0xba, 0x15, 0xd0, 0xa6, 0xcb, 0xec, 0xe0, 0x84, 0xd0, 0x5e, 0x63, 0x51, - 0x69, 0x29, 0x2a, 0x24, 0xc3, 0xb1, 0x06, 0xf4, 0x43, 0x03, 0x16, 0x3b, 0x94, 0x1c, 0x12, 0x4a, - 0x89, 0xa3, 0xf0, 0xd5, 0xfc, 0x9a, 0xf1, 0x08, 0xd4, 0x56, 0x95, 0xda, 0xc5, 0xbd, 0x01, 0xf9, - 0x78, 0x48, 0x23, 0xfa, 0xa5, 0x01, 0x2b, 0x8c, 0xd0, 0x13, 0x42, 0x37, 0x1c, 0x87, 0x12, 0xc6, - 0x1a, 0xbd, 0x4d, 0xcf, 0x25, 0x7e, 0xb8, 0xb9, 0xdd, 0xc4, 0xac, 0x3a, 0x2d, 0xf6, 0xe1, 0xab, - 0xd9, 0x0c, 0xda, 0x1f, 0x27, 0xa7, 0x61, 0x2a, 0x8b, 0x56, 0xc6, 0x92, 0x30, 0xfc, 0x00, 0x33, - 0xcc, 0x43, 0x98, 0x8b, 0x0e, 0xf2, 0x96, 0xcb, 0x42, 0x74, 0x17, 0x66, 0x5a, 0xfc, 0x83, 0x55, - 0x0d, 0x61, 0x60, 0x3d, 0x9b, 0x81, 0x91, 0x8c, 0xc6, 0xbc, 0xb2, 0x67, 0x46, 0x7c, 0x32, 0xac, - 0xa4, 0x99, 0x3f, 0x99, 0x86, 0xf2, 0xc6, 0xde, 0x36, 0x26, 0x2c, 0xe8, 0x52, 0x9b, 0x64, 0x70, - 0x9a, 0x6b, 0x30, 0xc7, 0x5c, 0xbf, 0xd5, 0xf5, 0x2c, 0xca, 0xa1, 0xd5, 0x19, 0x41, 0xb9, 0xac, - 0x28, 0xe7, 0xf6, 0x35, 0x1c, 0x4e, 0x51, 0xa2, 0xab, 0x00, 0x5c, 0x02, 0xeb, 0x58, 0x36, 0x71, - 0xaa, 0xb9, 0x35, 0xe3, 0x72, 0xb1, 0x81, 0x14, 0x1f, 0xbc, 0x1e, 0x63, 0xb0, 0x46, 0x85, 0x1e, - 0x87, 0x82, 0xb0, 0xb4, 0x5a, 0x14, 0x6a, 0x2a, 0x8a, 0xbc, 0x20, 0x96, 0x81, 0x25, 0x0e, 0x3d, - 0x05, 0xb3, 0xca, 0xcb, 0xaa, 0x25, 0x41, 0xb6, 0xa0, 0xc8, 0x66, 0x23, 0x37, 0x88, 0xf0, 0x7c, - 0x7d, 0xc7, 0xae, 0xef, 0x08, 0xbf, 0xd3, 0xd6, 0xf7, 0x9a, 0xeb, 0x3b, 0x58, 0x60, 0xd0, 0x2d, - 0x28, 0x9c, 0x10, 0x7a, 0xc0, 0x3d, 0x81, 0xbb, 0xe6, 0xff, 0x67, 0xdb, 0xe8, 0xbb, 0x9c, 0xa5, - 0x51, 0xe2, 0xa6, 0x89, 0x9f, 0x58, 0x0a, 0x41, 0x75, 0x00, 0x76, 0x14, 0xd0, 0x50, 0x2c, 0xaf, - 0x5a, 0x58, 0xcb, 0x5f, 0x2e, 0x35, 0xe6, 0xf9, 0x7a, 0xf7, 0x63, 0x28, 0xd6, 0x28, 0x38, 0xbd, - 0x6d, 0x85, 0xa4, 0x15, 0x50, 0x97, 0xb0, 0xea, 0x6c, 0x42, 0xbf, 0x19, 0x43, 0xb1, 0x46, 0x81, - 0x5e, 0x05, 0xc4, 0xc2, 0x80, 0x5a, 0x2d, 0xa2, 0x96, 0xfa, 0x8a, 0xc5, 0x8e, 0xaa, 0x20, 0x56, - 0xb7, 0xa2, 0x56, 0x87, 0xf6, 0x87, 0x28, 0xf0, 0x08, 0x2e, 0xf3, 0x37, 0x06, 0x2c, 0x68, 0xbe, - 0x20, 0xfc, 0xee, 0x1a, 0xcc, 0xb5, 0xb4, 0x5b, 0xa7, 0xfc, 0x22, 0x3e, 0x6d, 0xfd, 0x46, 0xe2, - 0x14, 0x25, 0x22, 0x50, 0xa2, 0x4a, 0x52, 0x14, 0x5d, 0xae, 0x64, 0x76, 0xda, 0xc8, 0x86, 0x44, - 0x93, 0x06, 0x64, 0x38, 0x91, 0x6c, 0xfe, 0xcd, 0x10, 0x0e, 0x1c, 0xc5, 0x1b, 0x74, 0x59, 0x8b, - 0x69, 0x86, 0xd8, 0xbe, 0xb9, 0x31, 0xf1, 0xe8, 0x9c, 0x40, 0x90, 0xfb, 0x8f, 0x08, 0x04, 0xd7, - 0x8b, 0x3f, 0xff, 0xa0, 0x36, 0xf5, 0xce, 0x5f, 0xd6, 0xa6, 0xcc, 0x9f, 0x19, 0x30, 0xb7, 0xd1, - 0xe9, 0x78, 0xbd, 0xdd, 0x4e, 0x28, 0x16, 0x60, 0xc2, 0x8c, 0x43, 0x7b, 0xb8, 0xeb, 0xab, 0x85, - 0x02, 0xbf, 0xdf, 0x4d, 0x01, 0xc1, 0x0a, 0xc3, 0xef, 0xcf, 0x61, 0x40, 0x6d, 0xa2, 0xae, 0x5b, - 0x7c, 0x7f, 0xb6, 0x38, 0x10, 0x4b, 0x1c, 0x3f, 0xe4, 0x43, 0x97, 0x78, 0xce, 0x8e, 0xe5, 0x5b, - 0x2d, 0x42, 0xd5, 0xe5, 0x88, 0xb7, 0x7e, 0x4b, 0xc3, 0xe1, 0x14, 0xa5, 0xf9, 0xcf, 0x1c, 0x94, - 0x36, 0x03, 0xdf, 0x71, 0x43, 0x75, 0xb9, 0xc2, 0x5e, 0x67, 0x28, 0x78, 0xdc, 0xee, 0x75, 0x08, - 0x16, 0x18, 0xf4, 0x02, 0xcc, 0xb0, 0xd0, 0x0a, 0xbb, 0x4c, 0xd8, 0x53, 0x6a, 0x3c, 0x16, 0x85, - 0xa5, 0x7d, 0x01, 0x3d, 0xeb, 0xd7, 0x16, 0x62, 0x71, 0x12, 0x84, 0x15, 0x03, 0xf7, 0xf4, 0xe0, - 0x40, 0x6c, 0x94, 0x73, 0x53, 0xa6, 0xbd, 0x28, 0x7f, 0xe4, 0x13, 0x4f, 0xdf, 0x1d, 0xa2, 0xc0, - 0x23, 0xb8, 0xd0, 0x09, 0x20, 0xcf, 0x62, 0xe1, 0x6d, 0x6a, 0xf9, 0x4c, 0xe8, 0xba, 0xed, 0xb6, - 0x89, 0xba, 0xf0, 0xff, 0x97, 0xed, 0xc4, 0x39, 0x47, 0xa2, 0xf7, 0xd6, 0x90, 0x34, 0x3c, 0x42, - 0x03, 0x7a, 0x12, 0x66, 0x28, 0xb1, 0x58, 0xe0, 0x57, 0x0b, 0x62, 0xf9, 0x71, 0x54, 0xc6, 0x02, - 0x8a, 0x15, 0x96, 0x07, 0xb4, 0x36, 0x61, 0xcc, 0x6a, 0x45, 0xe1, 0x35, 0x0e, 0x68, 0x3b, 0x12, - 0x8c, 0x23, 0xbc, 0xf9, 0x6b, 0x03, 0x2a, 0x9b, 0x94, 0x58, 0x21, 0x99, 0xc4, 0x2d, 0x3e, 0xf7, - 0x89, 0xa3, 0x0d, 0x58, 0x10, 0xdf, 0x77, 0x2d, 0xcf, 0x75, 0xe4, 0x19, 0x4c, 0x0b, 0xe6, 0xff, - 0x56, 0xcc, 0x0b, 0x5b, 0x69, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x9c, 0x87, 0x4a, 0x93, 0x78, 0x24, - 0x31, 0x79, 0x0b, 0x50, 0x8b, 0x5a, 0x36, 0xd9, 0x23, 0xd4, 0x0d, 0x9c, 0x7d, 0x62, 0x07, 0xbe, - 0xc3, 0x84, 0x1b, 0xe5, 0x1b, 0xff, 0xc5, 0xf7, 0xf7, 0xe6, 0x10, 0x16, 0x8f, 0xe0, 0x40, 0x1e, - 0x54, 0x3a, 0x54, 0xfc, 0x16, 0x7b, 0x2e, 0xbd, 0xac, 0x7c, 0xf5, 0x4b, 0xd9, 0x8e, 0x74, 0x4f, - 0x67, 0x6d, 0x2c, 0x9d, 0xf6, 0x6b, 0x95, 0x14, 0x08, 0xa7, 0x85, 0xa3, 0xaf, 0xc1, 0x62, 0x40, - 0x3b, 0x47, 0x96, 0xdf, 0x24, 0x1d, 0xe2, 0x3b, 0xc4, 0x0f, 0x99, 0xd8, 0xc8, 0x62, 0x63, 0x99, - 0xd7, 0x22, 0xbb, 0x03, 0x38, 0x3c, 0x44, 0x8d, 0xee, 0xc1, 0x52, 0x87, 0x06, 0x1d, 0xab, 0x25, - 0x36, 0x66, 0x2f, 0xf0, 0x5c, 0xbb, 0xa7, 0xb6, 0xf3, 0xe9, 0xd3, 0x7e, 0x6d, 0x69, 0x6f, 0x10, - 0x79, 0xd6, 0xaf, 0x5d, 0x10, 0x5b, 0xc7, 0x21, 0x09, 0x12, 0x0f, 0x8b, 0xd1, 0xdc, 0xa0, 0x30, - 0xce, 0x0d, 0xcc, 0x6d, 0x28, 0x36, 0xbb, 0xea, 0x4e, 0xbc, 0x04, 0x45, 0x47, 0xfd, 0x56, 0x3b, - 0x1f, 0x5d, 0xce, 0x98, 0xe6, 0xac, 0x5f, 0xab, 0xf0, 0xf2, 0xb3, 0x1e, 0x01, 0x70, 0xcc, 0x62, - 0x3e, 0x09, 0x45, 0x71, 0xf0, 0xec, 0xee, 0x15, 0xb4, 0x08, 0x79, 0x6c, 0xdd, 0x17, 0x52, 0xe6, - 0x30, 0xff, 0xa9, 0x45, 0xb1, 0x5d, 0x80, 0x9b, 0x24, 0x8c, 0x0e, 0x7e, 0x03, 0x16, 0xa2, 0x50, - 0x9e, 0xce, 0x30, 0xb1, 0x37, 0xe1, 0x34, 0x1a, 0x0f, 0xd2, 0x9b, 0x6f, 0x40, 0x49, 0x64, 0x21, - 0x9e, 0xc2, 0x93, 0x72, 0xc1, 0x78, 0x40, 0xb9, 0x10, 0xd5, 0x00, 0xb9, 0x71, 0x35, 0x80, 0x66, - 0xae, 0x07, 0x15, 0xc9, 0x1b, 0x15, 0x48, 0x99, 0x34, 0x3c, 0x0d, 0xc5, 0xc8, 0x4c, 0xa5, 0x25, - 0x2e, 0x8c, 0x23, 0x41, 0x38, 0xa6, 0xd0, 0xb4, 0x1d, 0x41, 0x2a, 0xa3, 0x66, 0x53, 0xa6, 0x55, - 0x3f, 0xb9, 0x07, 0x57, 0x3f, 0x9a, 0xa6, 0x1f, 0x40, 0x75, 0x5c, 0x35, 0xfd, 0x10, 0x39, 0x3f, - 0xbb, 0x29, 0xe6, 0x7b, 0x06, 0x2c, 0xea, 0x92, 0xb2, 0x1f, 0x5f, 0x76, 0x25, 0xe7, 0x57, 0x7b, - 0xda, 0x8e, 0xfc, 0xc2, 0x80, 0xe5, 0xd4, 0xd2, 0x26, 0x3a, 0xf1, 0x09, 0x8c, 0xd2, 0x9d, 0x23, - 0x3f, 0x81, 0x73, 0xfc, 0x29, 0x07, 0x95, 0x5b, 0xd6, 0x01, 0xf1, 0xf6, 0x89, 0x47, 0xec, 0x30, - 0xa0, 0xe8, 0xfb, 0x50, 0x6e, 0x5b, 0xa1, 0x7d, 0x24, 0xa0, 0x51, 0x67, 0xd0, 0xcc, 0x16, 0xec, - 0x52, 0x92, 0xea, 0x3b, 0x89, 0x98, 0x1b, 0x7e, 0x48, 0x7b, 0x8d, 0x0b, 0xca, 0xa4, 0xb2, 0x86, - 0xc1, 0xba, 0x36, 0xd1, 0xce, 0x89, 0xef, 0x1b, 0x6f, 0x77, 0x78, 0xd9, 0x32, 0x79, 0x17, 0x99, - 0x32, 0x01, 0x93, 0xb7, 0xba, 0x2e, 0x25, 0x6d, 0xe2, 0x87, 0x49, 0x3b, 0xb7, 0x33, 0x20, 0x1f, - 0x0f, 0x69, 0x5c, 0x79, 0x19, 0x16, 0x07, 0x8d, 0xe7, 0xf1, 0xe7, 0x98, 0xf4, 0xe4, 0x79, 0x61, - 0xfe, 0x13, 0x2d, 0x43, 0xe1, 0xc4, 0xf2, 0xba, 0xea, 0x36, 0x62, 0xf9, 0x71, 0x3d, 0x77, 0xcd, - 0x30, 0x7f, 0x65, 0x40, 0x75, 0x9c, 0x21, 0xe8, 0x7f, 0x35, 0x41, 0x8d, 0xb2, 0xb2, 0x2a, 0xff, - 0x1a, 0xe9, 0x49, 0xa9, 0x37, 0xa0, 0x18, 0x74, 0x78, 0x4d, 0x11, 0x50, 0x75, 0xea, 0x4f, 0x45, - 0x27, 0xb9, 0xab, 0xe0, 0x67, 0xfd, 0xda, 0xc5, 0x94, 0xf8, 0x08, 0x81, 0x63, 0x56, 0x1e, 0xa9, - 0x85, 0x3d, 0x3c, 0x7b, 0xc4, 0x91, 0xfa, 0xae, 0x80, 0x60, 0x85, 0x31, 0x7f, 0x67, 0xc0, 0xb4, - 0x28, 0xc8, 0xdf, 0x80, 0x22, 0xdf, 0x3f, 0xc7, 0x0a, 0x2d, 0x61, 0x57, 0xe6, 0x56, 0x90, 0x73, - 0xef, 0x90, 0xd0, 0x4a, 0xbc, 0x2d, 0x82, 0xe0, 0x58, 0x22, 0xc2, 0x50, 0x70, 0x43, 0xd2, 0x8e, - 0x0e, 0xf2, 0x99, 0xb1, 0xa2, 0xd5, 0x20, 0xa2, 0x8e, 0xad, 0xfb, 0x37, 0xde, 0x0e, 0x89, 0xcf, - 0x0f, 0x23, 0xb9, 0x1a, 0xdb, 0x5c, 0x06, 0x96, 0xa2, 0xcc, 0x7f, 0x18, 0x10, 0xab, 0xe2, 0xce, - 0xcf, 0x88, 0x77, 0x78, 0xcb, 0xf5, 0x8f, 0xd5, 0xb6, 0xc6, 0xe6, 0xec, 0x2b, 0x38, 0x8e, 0x29, - 0x46, 0xa5, 0x87, 0xdc, 0x64, 0xe9, 0x81, 0x2b, 0xb4, 0x03, 0x3f, 0x74, 0xfd, 0xee, 0xd0, 0x6d, - 0xdb, 0x54, 0x70, 0x1c, 0x53, 0xf0, 0x42, 0x84, 0x92, 0xb6, 0xe5, 0xfa, 0xae, 0xdf, 0xe2, 0x8b, - 0xd8, 0x0c, 0xba, 0x7e, 0x28, 0x32, 0xb2, 0x2a, 0x44, 0xf0, 0x10, 0x16, 0x8f, 0xe0, 0x30, 0x7f, - 0x3b, 0x0d, 0x65, 0xbe, 0xe6, 0x28, 0xcf, 0xbd, 0x08, 0x15, 0x4f, 0xf7, 0x02, 0xb5, 0xf6, 0x8b, - 0xca, 0x94, 0xf4, 0xbd, 0xc6, 0x69, 0x5a, 0xce, 0x2c, 0x4a, 0xa8, 0x98, 0x39, 0x97, 0x66, 0xde, - 0xd2, 0x91, 0x38, 0x4d, 0xcb, 0xa3, 0xd7, 0x7d, 0x7e, 0x3f, 0x54, 0x65, 0x12, 0x1f, 0xd1, 0xd7, - 0x39, 0x10, 0x4b, 0x1c, 0xda, 0x81, 0x0b, 0x96, 0xe7, 0x05, 0xf7, 0x05, 0xb0, 0x11, 0x04, 0xc7, - 0x6d, 0x8b, 0x1e, 0x33, 0xd1, 0x4c, 0x17, 0x1b, 0xff, 0xa3, 0x58, 0x2e, 0x6c, 0x0c, 0x93, 0xe0, - 0x51, 0x7c, 0xa3, 0x8e, 0x6d, 0x7a, 0xc2, 0x63, 0x3b, 0x82, 0xe5, 0x01, 0x90, 0xb8, 0xe5, 0xaa, - 0xb3, 0x7d, 0x4e, 0xc9, 0x59, 0xc6, 0x23, 0x68, 0xce, 0xc6, 0xc0, 0xf1, 0x48, 0x89, 0xe8, 0x3a, - 0xcc, 0x73, 0x4f, 0x0e, 0xba, 0x61, 0x54, 0x77, 0x16, 0xc4, 0x71, 0xa3, 0xd3, 0x7e, 0x6d, 0xfe, - 0x76, 0x0a, 0x83, 0x07, 0x28, 0xf9, 0xe6, 0x7a, 0x6e, 0xdb, 0x0d, 0xab, 0xb3, 0x82, 0x25, 0xde, - 0xdc, 0x5b, 0x1c, 0x88, 0x25, 0x2e, 0xe5, 0x81, 0xc5, 0xf3, 0x3c, 0xd0, 0xfc, 0x63, 0x1e, 0x90, - 0xac, 0xb5, 0x1d, 0x59, 0x4f, 0xc9, 0x90, 0xc6, 0x3b, 0x02, 0x55, 0xab, 0x1b, 0x03, 0x1d, 0x81, - 0x2a, 0xd3, 0x23, 0x3c, 0xda, 0x81, 0x92, 0x0c, 0x2d, 0xc9, 0x75, 0x59, 0x57, 0xc4, 0xa5, 0xdd, - 0x08, 0x71, 0xd6, 0xaf, 0xad, 0xa4, 0xd4, 0xc4, 0x18, 0xd1, 0xad, 0x25, 0x12, 0xd0, 0x55, 0x00, - 0xab, 0xe3, 0xea, 0xf3, 0xba, 0x52, 0x32, 0xb5, 0x49, 0x3a, 0x6f, 0xac, 0x51, 0xa1, 0x57, 0x60, - 0x3a, 0xfc, 0x7c, 0x1d, 0x55, 0x51, 0x34, 0x8c, 0xbc, 0x7f, 0x12, 0x12, 0xb8, 0x76, 0xe1, 0xcf, - 0x8c, 0x9b, 0xa5, 0x9a, 0xa1, 0x58, 0xfb, 0x56, 0x8c, 0xc1, 0x1a, 0x15, 0xfa, 0x06, 0x14, 0x0f, - 0x55, 0x29, 0x2a, 0x0e, 0x26, 0x73, 0x88, 0x8c, 0x0a, 0x58, 0x39, 0x32, 0x88, 0xbe, 0x70, 0x2c, - 0x0d, 0x7d, 0x19, 0xca, 0xac, 0x7b, 0x10, 0x67, 0x6f, 0x79, 0x9a, 0x71, 0xaa, 0xdc, 0x4f, 0x50, - 0x58, 0xa7, 0x33, 0xdf, 0x82, 0xd2, 0x8e, 0x6b, 0xd3, 0x40, 0xf4, 0x80, 0x4f, 0xc1, 0x2c, 0x4b, - 0x35, 0x38, 0xf1, 0x49, 0x46, 0x5e, 0x16, 0xe1, 0xb9, 0x7b, 0xf9, 0x96, 0x1f, 0xc8, 0x36, 0xa6, - 0x90, 0xb8, 0xd7, 0xeb, 0x1c, 0x88, 0x25, 0xee, 0xfa, 0x32, 0x2f, 0x10, 0xde, 0xfd, 0xb0, 0x36, - 0xf5, 0xfe, 0x87, 0xb5, 0xa9, 0x0f, 0x3e, 0x54, 0xc5, 0xc2, 0xbb, 0x65, 0x80, 0xdd, 0x83, 0xef, - 0x12, 0x5b, 0x86, 0xdd, 0x4c, 0x63, 0xbd, 0x68, 0x9a, 0x2c, 0xc6, 0x7a, 0xb9, 0x81, 0xa2, 0x4f, - 0xc3, 0xe1, 0x14, 0x25, 0x5a, 0x87, 0x52, 0x3c, 0xb0, 0x53, 0xfe, 0xb1, 0x14, 0xf9, 0x5b, 0x3c, - 0xd5, 0xc3, 0x09, 0x4d, 0x2a, 0x07, 0x4c, 0x9f, 0x9b, 0x03, 0x1a, 0x90, 0xef, 0xba, 0x8e, 0x6a, - 0x98, 0x9f, 0x8d, 0x72, 0xf0, 0x9d, 0xed, 0xe6, 0x59, 0xbf, 0xf6, 0xd8, 0xb8, 0x39, 0x79, 0xd8, - 0xeb, 0x10, 0x56, 0xbf, 0xb3, 0xdd, 0xc4, 0x9c, 0x79, 0x54, 0x40, 0x9a, 0x99, 0x30, 0x20, 0x5d, - 0x05, 0x68, 0x25, 0x63, 0x07, 0x79, 0xdf, 0x63, 0x47, 0xd4, 0xc6, 0x0d, 0x1a, 0x15, 0x62, 0xb0, - 0x64, 0xf3, 0xd6, 0x5c, 0xb5, 0xff, 0x2c, 0xb4, 0xda, 0x72, 0x90, 0x39, 0xd9, 0x9d, 0xb8, 0xa4, - 0xd4, 0x2c, 0x6d, 0x0e, 0x0a, 0xc3, 0xc3, 0xf2, 0x51, 0x00, 0x4b, 0x8e, 0xea, 0x10, 0x13, 0xa5, - 0xa5, 0x89, 0x95, 0x5e, 0xe4, 0x0a, 0x9b, 0x83, 0x82, 0xf0, 0xb0, 0x6c, 0xf4, 0x6d, 0x58, 0x89, - 0x80, 0xc3, 0x6d, 0xba, 0x08, 0xd8, 0xf9, 0xc6, 0xea, 0x69, 0xbf, 0xb6, 0xd2, 0x1c, 0x4b, 0x85, - 0x1f, 0x20, 0x01, 0x39, 0x30, 0xe3, 0xc9, 0x02, 0xb7, 0x2c, 0x8a, 0x92, 0xaf, 0x64, 0x5b, 0x45, - 0xe2, 0xfd, 0x75, 0xbd, 0xb0, 0x8d, 0x47, 0x2e, 0xaa, 0xa6, 0x55, 0xb2, 0xd1, 0xdb, 0x50, 0xb6, - 0x7c, 0x3f, 0x08, 0x2d, 0x39, 0x38, 0x98, 0x13, 0xaa, 0x36, 0x26, 0x56, 0xb5, 0x91, 0xc8, 0x18, - 0x28, 0xa4, 0x35, 0x0c, 0xd6, 0x55, 0xa1, 0xfb, 0xb0, 0x10, 0xdc, 0xf7, 0x09, 0xc5, 0xe4, 0x90, - 0x50, 0xe2, 0xdb, 0x84, 0x55, 0x2b, 0x42, 0xfb, 0x73, 0x19, 0xb5, 0xa7, 0x98, 0x13, 0x97, 0x4e, - 0xc3, 0x19, 0x1e, 0xd4, 0x82, 0xea, 0x3c, 0xb6, 0xfa, 0x96, 0xe7, 0x7e, 0x8f, 0x50, 0x56, 0x9d, - 0x4f, 0x66, 0xcd, 0x5b, 0x31, 0x14, 0x6b, 0x14, 0x68, 0x13, 0xca, 0xb6, 0xd7, 0x65, 0x21, 0x91, - 0x83, 0xff, 0x85, 0xd4, 0x04, 0xef, 0xd2, 0xbd, 0x7b, 0xf7, 0xbe, 0xd3, 0x24, 0x1d, 0x4a, 0x6c, - 0x2b, 0x24, 0xce, 0x66, 0x42, 0x88, 0x75, 0x2e, 0xd4, 0x85, 0x4a, 0x5b, 0xcf, 0x3b, 0xd5, 0x25, - 0xb1, 0xd6, 0x6b, 0xd9, 0xd6, 0x3a, 0x9c, 0x19, 0x93, 0x32, 0x28, 0x85, 0xc3, 0x69, 0x2d, 0x2b, - 0x2f, 0x40, 0xf9, 0x73, 0x76, 0x08, 0xbc, 0xc3, 0x18, 0x3c, 0xd5, 0x89, 0x3a, 0x8c, 0xdf, 0xe7, - 0x60, 0x3e, 0x7d, 0x16, 0x03, 0x39, 0xb5, 0x90, 0x29, 0xa7, 0x46, 0xbd, 0xac, 0x31, 0xf6, 0xe5, - 0x22, 0x0a, 0xf2, 0xf9, 0xb1, 0x41, 0x5e, 0xc5, 0xd2, 0xe9, 0x87, 0x89, 0xa5, 0x75, 0x00, 0x5e, - 0xac, 0xd0, 0xc0, 0xf3, 0x08, 0x15, 0x61, 0xb4, 0xa8, 0x5e, 0x28, 0x62, 0x28, 0xd6, 0x28, 0x78, - 0x49, 0x7d, 0xe0, 0x05, 0xf6, 0xb1, 0xd8, 0x82, 0x28, 0x04, 0x88, 0x00, 0x5a, 0x94, 0x25, 0x75, - 0x63, 0x08, 0x8b, 0x47, 0x70, 0x98, 0x3d, 0xb8, 0xb8, 0x67, 0xd1, 0xd0, 0xb5, 0xbc, 0xe4, 0xba, - 0x89, 0x9e, 0xe5, 0xcd, 0xa1, 0x8e, 0xe8, 0xd9, 0x49, 0xaf, 0x6d, 0xb2, 0xf9, 0x09, 0x2c, 0xe9, - 0x8a, 0xcc, 0x3f, 0x1b, 0x70, 0x69, 0xa4, 0xee, 0x2f, 0xa0, 0x23, 0x7b, 0x33, 0xdd, 0x91, 0xbd, - 0x98, 0x71, 0x94, 0x39, 0xca, 0xda, 0x31, 0xfd, 0xd9, 0x2c, 0x14, 0xf6, 0x78, 0x25, 0x6c, 0x7e, - 0x6c, 0xc0, 0x9c, 0xf8, 0x35, 0xc9, 0x24, 0xb9, 0x96, 0x7e, 0x60, 0x28, 0x3d, 0xba, 0xc7, 0x85, - 0x47, 0x31, 0x6a, 0x7e, 0xcf, 0x80, 0xf4, 0x0c, 0x17, 0xbd, 0x2c, 0xaf, 0x80, 0x11, 0x0f, 0x59, - 0x27, 0x74, 0xff, 0x97, 0xc6, 0xb5, 0xa4, 0x17, 0x32, 0x4d, 0x2b, 0x9f, 0x86, 0x12, 0x0e, 0x82, - 0x70, 0xcf, 0x0a, 0x8f, 0x18, 0xdf, 0xbb, 0x0e, 0xff, 0xa1, 0xb6, 0x57, 0xec, 0x9d, 0xc0, 0x60, - 0x09, 0x37, 0x7f, 0x6a, 0xc0, 0xa5, 0xb1, 0xef, 0x46, 0x3c, 0x8a, 0xd8, 0xf1, 0x97, 0x5a, 0x51, - 0xec, 0xc8, 0x09, 0x1d, 0xd6, 0xa8, 0x78, 0x2f, 0x99, 0x7a, 0x6c, 0x1a, 0xec, 0x25, 0x53, 0xda, - 0x70, 0x9a, 0xd6, 0xfc, 0x7b, 0x0e, 0xd4, 0x43, 0xcd, 0xbf, 0xd9, 0xe9, 0x9f, 0x1c, 0x78, 0x26, - 0x9a, 0x4f, 0x3f, 0x13, 0xc5, 0x6f, 0x42, 0xda, 0x3b, 0x49, 0xfe, 0xc1, 0xef, 0x24, 0xe8, 0xf9, - 0xf8, 0xe9, 0x45, 0xfa, 0xd0, 0x6a, 0xfa, 0xe9, 0xe5, 0xac, 0x5f, 0x9b, 0x53, 0xc2, 0xd3, 0x4f, - 0x31, 0xf7, 0x60, 0xd6, 0x21, 0xa1, 0xe5, 0x7a, 0xb2, 0x2f, 0xcc, 0xfc, 0x98, 0x20, 0x85, 0x35, - 0x25, 0x6b, 0xa3, 0xcc, 0x6d, 0x52, 0x1f, 0x38, 0x12, 0xc8, 0x03, 0xb6, 0x1d, 0x38, 0xb2, 0xad, - 0x29, 0x24, 0x01, 0x7b, 0x33, 0x70, 0x08, 0x16, 0x18, 0xf3, 0x7d, 0x03, 0xca, 0x52, 0xd2, 0xa6, - 0xd5, 0x65, 0x04, 0x5d, 0x89, 0x57, 0x21, 0x8f, 0xfb, 0x92, 0xfe, 0xc6, 0x76, 0xd6, 0xaf, 0x95, - 0x04, 0x99, 0xe8, 0x88, 0x46, 0xbc, 0x25, 0xe5, 0xce, 0xd9, 0xa3, 0xc7, 0xa1, 0x20, 0x2e, 0x90, - 0xda, 0xcc, 0xe4, 0xb1, 0x90, 0x03, 0xb1, 0xc4, 0x99, 0x9f, 0xe6, 0xa0, 0x92, 0x5a, 0x5c, 0x86, - 0xe6, 0x22, 0x1e, 0xa1, 0xe6, 0x32, 0x8c, 0xe5, 0xc7, 0x3f, 0xcd, 0xab, 0xf4, 0x35, 0xf3, 0x30, - 0xe9, 0xeb, 0x9b, 0x30, 0x63, 0xf3, 0x3d, 0x8a, 0xfe, 0xe9, 0x71, 0x65, 0x92, 0xe3, 0x14, 0xbb, - 0x9b, 0x78, 0xa3, 0xf8, 0x64, 0x58, 0x09, 0x44, 0x37, 0x61, 0x89, 0x92, 0x90, 0xf6, 0x36, 0x0e, - 0x43, 0x42, 0xf5, 0x61, 0x42, 0x21, 0x29, 0xe1, 0xf1, 0x20, 0x01, 0x1e, 0xe6, 0x31, 0x0f, 0x60, - 0xee, 0xb6, 0x75, 0xe0, 0xc5, 0xcf, 0x63, 0x18, 0x2a, 0xae, 0x6f, 0x7b, 0x5d, 0x87, 0xc8, 0x80, - 0x1e, 0x45, 0xaf, 0xe8, 0xd2, 0x6e, 0xeb, 0xc8, 0xb3, 0x7e, 0xed, 0x42, 0x0a, 0x20, 0xdf, 0x83, - 0x70, 0x5a, 0x84, 0xe9, 0xc1, 0xf4, 0x17, 0xd8, 0x8e, 0x7e, 0x0b, 0x4a, 0x49, 0xc3, 0xf0, 0x88, - 0x55, 0x9a, 0x6f, 0x42, 0x91, 0x7b, 0x7c, 0xd4, 0xe8, 0x9e, 0x53, 0x25, 0xa5, 0x6b, 0xaf, 0x5c, - 0x96, 0xda, 0x4b, 0x3c, 0xb2, 0xde, 0xe9, 0x38, 0x0f, 0xf9, 0xc8, 0x9a, 0x7b, 0x98, 0xcc, 0x97, - 0x9f, 0x30, 0xf3, 0x5d, 0x05, 0xf9, 0x47, 0x14, 0x9e, 0x64, 0x64, 0x01, 0xa1, 0x25, 0x19, 0x3d, - 0xff, 0x6b, 0x2f, 0x0c, 0x3f, 0x32, 0x00, 0xc4, 0x28, 0xef, 0xc6, 0x09, 0xf1, 0xc3, 0x0c, 0xcf, - 0xf9, 0x77, 0x60, 0x26, 0x90, 0x1e, 0x29, 0x1f, 0x5a, 0x27, 0x9c, 0x17, 0xc7, 0x17, 0x49, 0xfa, - 0x24, 0x56, 0xc2, 0x1a, 0xaf, 0x7e, 0xf4, 0xd9, 0xea, 0xd4, 0xc7, 0x9f, 0xad, 0x4e, 0x7d, 0xf2, - 0xd9, 0xea, 0xd4, 0x3b, 0xa7, 0xab, 0xc6, 0x47, 0xa7, 0xab, 0xc6, 0xc7, 0xa7, 0xab, 0xc6, 0x27, - 0xa7, 0xab, 0xc6, 0xa7, 0xa7, 0xab, 0xc6, 0xfb, 0x7f, 0x5d, 0x9d, 0xba, 0xf7, 0x44, 0x96, 0x3f, - 0xf8, 0xfd, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x2d, 0x24, 0x60, 0xec, 0x20, 0x28, 0x00, 0x00, + // 2867 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x4b, 0x6f, 0x24, 0x47, + 0xd9, 0x3d, 0x0f, 0x7b, 0xe6, 0x9b, 0x19, 0x3f, 0x6a, 0xbd, 0x30, 0x6b, 0x84, 0xc7, 0xe9, 0x44, + 0xd1, 0x06, 0x92, 0x71, 0x76, 0x09, 0xd1, 0x66, 0x43, 0x02, 0x1e, 0xcf, 0x7a, 0xe3, 0x64, 0x1d, + 0x5b, 0xe5, 0xdd, 0x05, 0x42, 0x84, 0xd2, 0x9e, 0x2e, 0x8f, 0x1b, 0xf7, 0x74, 0x4f, 0xaa, 0x7a, + 0xbc, 0x19, 0x38, 0x90, 0x03, 0x08, 0x90, 0x50, 0x14, 0x6e, 0x9c, 0x50, 0x22, 0xf8, 0x01, 0x88, + 0x13, 0x77, 0x90, 0xc8, 0x31, 0x88, 0x4b, 0x24, 0xd0, 0x28, 0x31, 0x07, 0x8e, 0x88, 0xab, 0x85, + 0x04, 0xaa, 0x47, 0x77, 0x57, 0xcf, 0x63, 0xdd, 0x93, 0x5d, 0x22, 0x6e, 0xd3, 0xdf, 0xbb, 0xaa, + 0xbe, 0xfa, 0xea, 0x7b, 0x0c, 0xec, 0x1c, 0x5f, 0x63, 0x75, 0xc7, 0x5f, 0x3f, 0xee, 0x1d, 0x10, + 0xea, 0x91, 0x80, 0xb0, 0xf5, 0x13, 0xe2, 0xd9, 0x3e, 0x5d, 0x57, 0x08, 0xab, 0xeb, 0x74, 0xac, + 0xd6, 0x91, 0xe3, 0x11, 0xda, 0x5f, 0xef, 0x1e, 0xb7, 0x39, 0x80, 0xad, 0x77, 0x48, 0x60, 0xad, + 0x9f, 0x5c, 0x59, 0x6f, 0x13, 0x8f, 0x50, 0x2b, 0x20, 0x76, 0xbd, 0x4b, 0xfd, 0xc0, 0x47, 0x8f, + 0x49, 0xae, 0xba, 0xce, 0x55, 0xef, 0x1e, 0xb7, 0x39, 0x80, 0xd5, 0x39, 0x57, 0xfd, 0xe4, 0xca, + 0xca, 0x53, 0x6d, 0x27, 0x38, 0xea, 0x1d, 0xd4, 0x5b, 0x7e, 0x67, 0xbd, 0xed, 0xb7, 0xfd, 0x75, + 0xc1, 0x7c, 0xd0, 0x3b, 0x14, 0x5f, 0xe2, 0x43, 0xfc, 0x92, 0x42, 0x57, 0x26, 0x9a, 0x42, 0x7b, + 0x5e, 0xe0, 0x74, 0xc8, 0xb0, 0x15, 0x2b, 0xcf, 0x9e, 0xc7, 0xc0, 0x5a, 0x47, 0xa4, 0x63, 0x0d, + 0xf3, 0x99, 0x7f, 0xca, 0x42, 0x61, 0x63, 0x6f, 0xfb, 0x26, 0xf5, 0x7b, 0x5d, 0xb4, 0x06, 0x39, + 0xcf, 0xea, 0x90, 0xaa, 0xb1, 0x66, 0x5c, 0x2e, 0x36, 0xca, 0x1f, 0x0c, 0x6a, 0x33, 0xa7, 0x83, + 0x5a, 0xee, 0x55, 0xab, 0x43, 0xb0, 0xc0, 0x20, 0x17, 0x0a, 0x27, 0x84, 0x32, 0xc7, 0xf7, 0x58, + 0x35, 0xb3, 0x96, 0xbd, 0x5c, 0xba, 0xfa, 0x62, 0x3d, 0xcd, 0xfa, 0xeb, 0x42, 0xc1, 0x5d, 0xc9, + 0xba, 0xe5, 0xd3, 0xa6, 0xc3, 0x5a, 0xfe, 0x09, 0xa1, 0xfd, 0xc6, 0xa2, 0xd2, 0x52, 0x50, 0x48, + 0x86, 0x23, 0x0d, 0xe8, 0x47, 0x06, 0x2c, 0x76, 0x29, 0x39, 0x24, 0x94, 0x12, 0x5b, 0xe1, 0xab, + 0xd9, 0x35, 0xe3, 0x21, 0xa8, 0xad, 0x2a, 0xb5, 0x8b, 0x7b, 0x43, 0xf2, 0xf1, 0x88, 0x46, 0xf4, + 0x6b, 0x03, 0x56, 0x18, 0xa1, 0x27, 0x84, 0x6e, 0xd8, 0x36, 0x25, 0x8c, 0x35, 0xfa, 0x9b, 0xae, + 0x43, 0xbc, 0x60, 0x73, 0xbb, 0x89, 0x59, 0x35, 0x27, 0xf6, 0xe1, 0xeb, 0xe9, 0x0c, 0xda, 0x9f, + 0x24, 0xa7, 0x61, 0x2a, 0x8b, 0x56, 0x26, 0x92, 0x30, 0x7c, 0x1f, 0x33, 0xcc, 0x43, 0x28, 0x87, + 0x07, 0x79, 0xcb, 0x61, 0x01, 0xba, 0x0b, 0xb3, 0x6d, 0xfe, 0xc1, 0xaa, 0x86, 0x30, 0xb0, 0x9e, + 0xce, 0xc0, 0x50, 0x46, 0x63, 0x5e, 0xd9, 0x33, 0x2b, 0x3e, 0x19, 0x56, 0xd2, 0xcc, 0x9f, 0xe5, + 0xa0, 0xb4, 0xb1, 0xb7, 0x8d, 0x09, 0xf3, 0x7b, 0xb4, 0x45, 0x52, 0x38, 0xcd, 0x35, 0x28, 0x33, + 0xc7, 0x6b, 0xf7, 0x5c, 0x8b, 0x72, 0x68, 0x75, 0x56, 0x50, 0x2e, 0x2b, 0xca, 0xf2, 0xbe, 0x86, + 0xc3, 0x09, 0x4a, 0x74, 0x15, 0x80, 0x4b, 0x60, 0x5d, 0xab, 0x45, 0xec, 0x6a, 0x66, 0xcd, 0xb8, + 0x5c, 0x68, 0x20, 0xc5, 0x07, 0xaf, 0x46, 0x18, 0xac, 0x51, 0xa1, 0x47, 0x21, 0x2f, 0x2c, 0xad, + 0x16, 0x84, 0x9a, 0x8a, 0x22, 0xcf, 0x8b, 0x65, 0x60, 0x89, 0x43, 0x4f, 0xc0, 0x9c, 0xf2, 0xb2, + 0x6a, 0x51, 0x90, 0x2d, 0x28, 0xb2, 0xb9, 0xd0, 0x0d, 0x42, 0x3c, 0x5f, 0xdf, 0xb1, 0xe3, 0xd9, + 0xc2, 0xef, 0xb4, 0xf5, 0xbd, 0xe2, 0x78, 0x36, 0x16, 0x18, 0x74, 0x0b, 0xf2, 0x27, 0x84, 0x1e, + 0x70, 0x4f, 0xe0, 0xae, 0xf9, 0xe5, 0x74, 0x1b, 0x7d, 0x97, 0xb3, 0x34, 0x8a, 0xdc, 0x34, 0xf1, + 0x13, 0x4b, 0x21, 0xa8, 0x0e, 0xc0, 0x8e, 0x7c, 0x1a, 0x88, 0xe5, 0x55, 0xf3, 0x6b, 0xd9, 0xcb, + 0xc5, 0xc6, 0x3c, 0x5f, 0xef, 0x7e, 0x04, 0xc5, 0x1a, 0x05, 0xa7, 0x6f, 0x59, 0x01, 0x69, 0xfb, + 0xd4, 0x21, 0xac, 0x3a, 0x17, 0xd3, 0x6f, 0x46, 0x50, 0xac, 0x51, 0xa0, 0x97, 0x01, 0xb1, 0xc0, + 0xa7, 0x56, 0x9b, 0xa8, 0xa5, 0xbe, 0x64, 0xb1, 0xa3, 0x2a, 0x88, 0xd5, 0xad, 0xa8, 0xd5, 0xa1, + 0xfd, 0x11, 0x0a, 0x3c, 0x86, 0xcb, 0xfc, 0x9d, 0x01, 0x0b, 0x9a, 0x2f, 0x08, 0xbf, 0xbb, 0x06, + 0xe5, 0xb6, 0x76, 0xeb, 0x94, 0x5f, 0x44, 0xa7, 0xad, 0xdf, 0x48, 0x9c, 0xa0, 0x44, 0x04, 0x8a, + 0x54, 0x49, 0x0a, 0xa3, 0xcb, 0x95, 0xd4, 0x4e, 0x1b, 0xda, 0x10, 0x6b, 0xd2, 0x80, 0x0c, 0xc7, + 0x92, 0xcd, 0x7f, 0x18, 0xc2, 0x81, 0xc3, 0x78, 0x83, 0x2e, 0x6b, 0x31, 0xcd, 0x10, 0xdb, 0x57, + 0x9e, 0x10, 0x8f, 0xce, 0x09, 0x04, 0x99, 0xff, 0x8b, 0x40, 0x70, 0xbd, 0xf0, 0xcb, 0xf7, 0x6a, + 0x33, 0x6f, 0xff, 0x6d, 0x6d, 0xc6, 0xfc, 0x85, 0x01, 0xe5, 0x8d, 0x6e, 0xd7, 0xed, 0xef, 0x76, + 0x03, 0xb1, 0x00, 0x13, 0x66, 0x6d, 0xda, 0xc7, 0x3d, 0x4f, 0x2d, 0x14, 0xf8, 0xfd, 0x6e, 0x0a, + 0x08, 0x56, 0x18, 0x7e, 0x7f, 0x0e, 0x7d, 0xda, 0x22, 0xea, 0xba, 0x45, 0xf7, 0x67, 0x8b, 0x03, + 0xb1, 0xc4, 0xf1, 0x43, 0x3e, 0x74, 0x88, 0x6b, 0xef, 0x58, 0x9e, 0xd5, 0x26, 0x54, 0x5d, 0x8e, + 0x68, 0xeb, 0xb7, 0x34, 0x1c, 0x4e, 0x50, 0x9a, 0xff, 0xc9, 0x40, 0x71, 0xd3, 0xf7, 0x6c, 0x27, + 0x50, 0x97, 0x2b, 0xe8, 0x77, 0x47, 0x82, 0xc7, 0xed, 0x7e, 0x97, 0x60, 0x81, 0x41, 0xcf, 0xc1, + 0x2c, 0x0b, 0xac, 0xa0, 0xc7, 0x84, 0x3d, 0xc5, 0xc6, 0x23, 0x61, 0x58, 0xda, 0x17, 0xd0, 0xb3, + 0x41, 0x6d, 0x21, 0x12, 0x27, 0x41, 0x58, 0x31, 0x70, 0x4f, 0xf7, 0x0f, 0xc4, 0x46, 0xd9, 0x37, + 0xe5, 0xb3, 0x17, 0xbe, 0x1f, 0xd9, 0xd8, 0xd3, 0x77, 0x47, 0x28, 0xf0, 0x18, 0x2e, 0x74, 0x02, + 0xc8, 0xb5, 0x58, 0x70, 0x9b, 0x5a, 0x1e, 0x13, 0xba, 0x6e, 0x3b, 0x1d, 0xa2, 0x2e, 0xfc, 0x97, + 0xd2, 0x9d, 0x38, 0xe7, 0x88, 0xf5, 0xde, 0x1a, 0x91, 0x86, 0xc7, 0x68, 0x40, 0x8f, 0xc3, 0x2c, + 0x25, 0x16, 0xf3, 0xbd, 0x6a, 0x5e, 0x2c, 0x3f, 0x8a, 0xca, 0x58, 0x40, 0xb1, 0xc2, 0xf2, 0x80, + 0xd6, 0x21, 0x8c, 0x59, 0xed, 0x30, 0xbc, 0x46, 0x01, 0x6d, 0x47, 0x82, 0x71, 0x88, 0x37, 0x7f, + 0x6b, 0x40, 0x65, 0x93, 0x12, 0x2b, 0x20, 0xd3, 0xb8, 0xc5, 0xa7, 0x3e, 0x71, 0xb4, 0x01, 0x0b, + 0xe2, 0xfb, 0xae, 0xe5, 0x3a, 0xb6, 0x3c, 0x83, 0x9c, 0x60, 0xfe, 0xbc, 0x62, 0x5e, 0xd8, 0x4a, + 0xa2, 0xf1, 0x30, 0xbd, 0xf9, 0x93, 0x2c, 0x54, 0x9a, 0xc4, 0x25, 0xb1, 0xc9, 0x5b, 0x80, 0xda, + 0xd4, 0x6a, 0x91, 0x3d, 0x42, 0x1d, 0xdf, 0xde, 0x27, 0x2d, 0xdf, 0xb3, 0x99, 0x70, 0xa3, 0x6c, + 0xe3, 0x73, 0x7c, 0x7f, 0x6f, 0x8e, 0x60, 0xf1, 0x18, 0x0e, 0xe4, 0x42, 0xa5, 0x4b, 0xc5, 0x6f, + 0xb1, 0xe7, 0xd2, 0xcb, 0x4a, 0x57, 0xbf, 0x92, 0xee, 0x48, 0xf7, 0x74, 0xd6, 0xc6, 0xd2, 0xe9, + 0xa0, 0x56, 0x49, 0x80, 0x70, 0x52, 0x38, 0xfa, 0x06, 0x2c, 0xfa, 0xb4, 0x7b, 0x64, 0x79, 0x4d, + 0xd2, 0x25, 0x9e, 0x4d, 0xbc, 0x80, 0x89, 0x8d, 0x2c, 0x34, 0x96, 0x79, 0x2e, 0xb2, 0x3b, 0x84, + 0xc3, 0x23, 0xd4, 0xe8, 0x35, 0x58, 0xea, 0x52, 0xbf, 0x6b, 0xb5, 0xc5, 0xc6, 0xec, 0xf9, 0xae, + 0xd3, 0xea, 0xab, 0xed, 0x7c, 0xf2, 0x74, 0x50, 0x5b, 0xda, 0x1b, 0x46, 0x9e, 0x0d, 0x6a, 0x17, + 0xc4, 0xd6, 0x71, 0x48, 0x8c, 0xc4, 0xa3, 0x62, 0x34, 0x37, 0xc8, 0x4f, 0x72, 0x03, 0x73, 0x1b, + 0x0a, 0xcd, 0x9e, 0xba, 0x13, 0x2f, 0x40, 0xc1, 0x56, 0xbf, 0xd5, 0xce, 0x87, 0x97, 0x33, 0xa2, + 0x39, 0x1b, 0xd4, 0x2a, 0x3c, 0xfd, 0xac, 0x87, 0x00, 0x1c, 0xb1, 0x98, 0x8f, 0x43, 0x41, 0x1c, + 0x3c, 0xbb, 0x7b, 0x05, 0x2d, 0x42, 0x16, 0x5b, 0xf7, 0x84, 0x94, 0x32, 0xe6, 0x3f, 0xb5, 0x28, + 0xb6, 0x0b, 0x70, 0x93, 0x04, 0xe1, 0xc1, 0x6f, 0xc0, 0x42, 0x18, 0xca, 0x93, 0x2f, 0x4c, 0xe4, + 0x4d, 0x38, 0x89, 0xc6, 0xc3, 0xf4, 0xe6, 0xeb, 0x50, 0x14, 0xaf, 0x10, 0x7f, 0xc2, 0xe3, 0x74, + 0xc1, 0xb8, 0x4f, 0xba, 0x10, 0xe6, 0x00, 0x99, 0x49, 0x39, 0x80, 0x66, 0xae, 0x0b, 0x15, 0xc9, + 0x1b, 0x26, 0x48, 0xa9, 0x34, 0x3c, 0x09, 0x85, 0xd0, 0x4c, 0xa5, 0x25, 0x4a, 0x8c, 0x43, 0x41, + 0x38, 0xa2, 0xd0, 0xb4, 0x1d, 0x41, 0xe2, 0x45, 0x4d, 0xa7, 0x4c, 0xcb, 0x7e, 0x32, 0xf7, 0xcf, + 0x7e, 0x34, 0x4d, 0x3f, 0x84, 0xea, 0xa4, 0x6c, 0xfa, 0x01, 0xde, 0xfc, 0xf4, 0xa6, 0x98, 0xef, + 0x18, 0xb0, 0xa8, 0x4b, 0x4a, 0x7f, 0x7c, 0xe9, 0x95, 0x9c, 0x9f, 0xed, 0x69, 0x3b, 0xf2, 0x2b, + 0x03, 0x96, 0x13, 0x4b, 0x9b, 0xea, 0xc4, 0xa7, 0x30, 0x4a, 0x77, 0x8e, 0xec, 0x14, 0xce, 0xf1, + 0x97, 0x0c, 0x54, 0x6e, 0x59, 0x07, 0xc4, 0xdd, 0x27, 0x2e, 0x69, 0x05, 0x3e, 0x45, 0x3f, 0x80, + 0x52, 0xc7, 0x0a, 0x5a, 0x47, 0x02, 0x1a, 0x56, 0x06, 0xcd, 0x74, 0xc1, 0x2e, 0x21, 0xa9, 0xbe, + 0x13, 0x8b, 0xb9, 0xe1, 0x05, 0xb4, 0xdf, 0xb8, 0xa0, 0x4c, 0x2a, 0x69, 0x18, 0xac, 0x6b, 0x13, + 0xe5, 0x9c, 0xf8, 0xbe, 0xf1, 0x56, 0x97, 0xa7, 0x2d, 0xd3, 0x57, 0x91, 0x09, 0x13, 0x30, 0x79, + 0xb3, 0xe7, 0x50, 0xd2, 0x21, 0x5e, 0x10, 0x97, 0x73, 0x3b, 0x43, 0xf2, 0xf1, 0x88, 0xc6, 0x95, + 0x17, 0x61, 0x71, 0xd8, 0x78, 0x1e, 0x7f, 0x8e, 0x49, 0x5f, 0x9e, 0x17, 0xe6, 0x3f, 0xd1, 0x32, + 0xe4, 0x4f, 0x2c, 0xb7, 0xa7, 0x6e, 0x23, 0x96, 0x1f, 0xd7, 0x33, 0xd7, 0x0c, 0xf3, 0x37, 0x06, + 0x54, 0x27, 0x19, 0x82, 0xbe, 0xa8, 0x09, 0x6a, 0x94, 0x94, 0x55, 0xd9, 0x57, 0x48, 0x5f, 0x4a, + 0xbd, 0x01, 0x05, 0xbf, 0xcb, 0x73, 0x0a, 0x9f, 0xaa, 0x53, 0x7f, 0x22, 0x3c, 0xc9, 0x5d, 0x05, + 0x3f, 0x1b, 0xd4, 0x2e, 0x26, 0xc4, 0x87, 0x08, 0x1c, 0xb1, 0xf2, 0x48, 0x2d, 0xec, 0xe1, 0xaf, + 0x47, 0x14, 0xa9, 0xef, 0x0a, 0x08, 0x56, 0x18, 0xf3, 0xf7, 0x06, 0xe4, 0x44, 0x42, 0xfe, 0x3a, + 0x14, 0xf8, 0xfe, 0xd9, 0x56, 0x60, 0x09, 0xbb, 0x52, 0x97, 0x82, 0x9c, 0x7b, 0x87, 0x04, 0x56, + 0xec, 0x6d, 0x21, 0x04, 0x47, 0x12, 0x11, 0x86, 0xbc, 0x13, 0x90, 0x4e, 0x78, 0x90, 0x4f, 0x4d, + 0x14, 0xad, 0x1a, 0x11, 0x75, 0x6c, 0xdd, 0xbb, 0xf1, 0x56, 0x40, 0x3c, 0x7e, 0x18, 0xf1, 0xd5, + 0xd8, 0xe6, 0x32, 0xb0, 0x14, 0x65, 0xfe, 0xcb, 0x80, 0x48, 0x15, 0x77, 0x7e, 0x46, 0xdc, 0xc3, + 0x5b, 0x8e, 0x77, 0xac, 0xb6, 0x35, 0x32, 0x67, 0x5f, 0xc1, 0x71, 0x44, 0x31, 0xee, 0x79, 0xc8, + 0x4c, 0xf7, 0x3c, 0x70, 0x85, 0x2d, 0xdf, 0x0b, 0x1c, 0xaf, 0x37, 0x72, 0xdb, 0x36, 0x15, 0x1c, + 0x47, 0x14, 0x3c, 0x11, 0xa1, 0xa4, 0x63, 0x39, 0x9e, 0xe3, 0xb5, 0xf9, 0x22, 0x36, 0xfd, 0x9e, + 0x17, 0x88, 0x17, 0x59, 0x25, 0x22, 0x78, 0x04, 0x8b, 0xc7, 0x70, 0x98, 0xff, 0xce, 0x41, 0x89, + 0xaf, 0x39, 0x7c, 0xe7, 0x9e, 0x87, 0x8a, 0xab, 0x7b, 0x81, 0x5a, 0xfb, 0x45, 0x65, 0x4a, 0xf2, + 0x5e, 0xe3, 0x24, 0x2d, 0x67, 0x16, 0x29, 0x54, 0xc4, 0x9c, 0x49, 0x32, 0x6f, 0xe9, 0x48, 0x9c, + 0xa4, 0xe5, 0xd1, 0xeb, 0x1e, 0xbf, 0x1f, 0x2a, 0x33, 0x89, 0x8e, 0xe8, 0x9b, 0x1c, 0x88, 0x25, + 0x0e, 0xed, 0xc0, 0x05, 0xcb, 0x75, 0xfd, 0x7b, 0x02, 0xd8, 0xf0, 0xfd, 0xe3, 0x8e, 0x45, 0x8f, + 0x99, 0x28, 0xa6, 0x0b, 0x8d, 0x2f, 0x28, 0x96, 0x0b, 0x1b, 0xa3, 0x24, 0x78, 0x1c, 0xdf, 0xb8, + 0x63, 0xcb, 0x4d, 0x79, 0x6c, 0x47, 0xb0, 0x3c, 0x04, 0x12, 0xb7, 0x5c, 0x55, 0xb6, 0xcf, 0x28, + 0x39, 0xcb, 0x78, 0x0c, 0xcd, 0xd9, 0x04, 0x38, 0x1e, 0x2b, 0x11, 0x5d, 0x87, 0x79, 0xee, 0xc9, + 0x7e, 0x2f, 0x08, 0xf3, 0xce, 0xbc, 0x38, 0x6e, 0x74, 0x3a, 0xa8, 0xcd, 0xdf, 0x4e, 0x60, 0xf0, + 0x10, 0x25, 0xdf, 0x5c, 0xd7, 0xe9, 0x38, 0x41, 0x75, 0x4e, 0xb0, 0x44, 0x9b, 0x7b, 0x8b, 0x03, + 0xb1, 0xc4, 0x25, 0x3c, 0xb0, 0x70, 0xae, 0x07, 0x6e, 0xc2, 0x12, 0x23, 0x9e, 0xbd, 0xed, 0x39, + 0x81, 0x63, 0xb9, 0x37, 0x4e, 0x44, 0x56, 0x59, 0x12, 0x07, 0x71, 0x91, 0xa7, 0x84, 0xfb, 0xc3, + 0x48, 0x3c, 0x4a, 0x6f, 0xfe, 0x39, 0x0b, 0x48, 0x26, 0xec, 0xb6, 0x4c, 0xca, 0x64, 0x5c, 0xe4, + 0x65, 0x85, 0x4a, 0xf8, 0x8d, 0xa1, 0xb2, 0x42, 0xe5, 0xfa, 0x21, 0x1e, 0xed, 0x40, 0x51, 0xc6, + 0xa7, 0xf8, 0xce, 0xad, 0x2b, 0xe2, 0xe2, 0x6e, 0x88, 0x38, 0x1b, 0xd4, 0x56, 0x12, 0x6a, 0x22, + 0x8c, 0x28, 0xf9, 0x62, 0x09, 0xe8, 0x2a, 0x80, 0xd5, 0x75, 0xf4, 0xa6, 0x5f, 0x31, 0x6e, 0xfd, + 0xc4, 0xe5, 0x3b, 0xd6, 0xa8, 0xd0, 0x4b, 0x90, 0x0b, 0x3e, 0x5d, 0x59, 0x56, 0x10, 0x55, 0x27, + 0x2f, 0xc2, 0x84, 0x04, 0xae, 0x5d, 0x5c, 0x0a, 0xc6, 0xcd, 0x52, 0x15, 0x55, 0xa4, 0x7d, 0x2b, + 0xc2, 0x60, 0x8d, 0x0a, 0x7d, 0x0b, 0x0a, 0x87, 0x2a, 0x9f, 0x15, 0xa7, 0x9b, 0x3a, 0xce, 0x86, + 0x59, 0xb0, 0xec, 0x3b, 0x84, 0x5f, 0x38, 0x92, 0x86, 0xbe, 0x0a, 0x25, 0xd6, 0x3b, 0x88, 0x52, + 0x00, 0xe9, 0x12, 0xd1, 0x7b, 0xbb, 0x1f, 0xa3, 0xb0, 0x4e, 0x67, 0xbe, 0x09, 0xc5, 0x1d, 0xa7, + 0x45, 0x7d, 0x51, 0x48, 0x3e, 0x01, 0x73, 0x2c, 0x51, 0x25, 0x45, 0x27, 0x19, 0xba, 0x6a, 0x88, + 0xe7, 0x3e, 0xea, 0x59, 0x9e, 0x2f, 0x6b, 0xa1, 0x7c, 0xec, 0xa3, 0xaf, 0x72, 0x20, 0x96, 0xb8, + 0xeb, 0xcb, 0x3c, 0xcb, 0xf8, 0xe9, 0xfb, 0xb5, 0x99, 0x77, 0xdf, 0xaf, 0xcd, 0xbc, 0xf7, 0xbe, + 0xca, 0x38, 0xfe, 0x00, 0x00, 0xbb, 0x07, 0xdf, 0x23, 0x2d, 0x19, 0xbb, 0x53, 0xf5, 0x06, 0xc3, + 0x96, 0xb4, 0xe8, 0x0d, 0x66, 0x86, 0x32, 0x47, 0x0d, 0x87, 0x13, 0x94, 0x68, 0x1d, 0x8a, 0x51, + 0xd7, 0x4f, 0xf9, 0xc7, 0x52, 0xe8, 0x6f, 0x51, 0x6b, 0x10, 0xc7, 0x34, 0x89, 0x87, 0x24, 0x77, + 0xee, 0x43, 0xd2, 0x80, 0x6c, 0xcf, 0xb1, 0x55, 0xd5, 0xfd, 0x74, 0xf8, 0x90, 0xdf, 0xd9, 0x6e, + 0x9e, 0x0d, 0x6a, 0x8f, 0x4c, 0x6a, 0xb6, 0x07, 0xfd, 0x2e, 0x61, 0xf5, 0x3b, 0xdb, 0x4d, 0xcc, + 0x99, 0xc7, 0x45, 0xb5, 0xd9, 0x29, 0xa3, 0xda, 0x55, 0x80, 0x76, 0xdc, 0xbb, 0x90, 0x41, 0x23, + 0x72, 0x44, 0xad, 0x67, 0xa1, 0x51, 0x21, 0x06, 0x4b, 0x2d, 0x5e, 0xdf, 0xab, 0x1e, 0x02, 0x0b, + 0xac, 0x8e, 0xec, 0x86, 0x4e, 0x77, 0x27, 0x2e, 0x29, 0x35, 0x4b, 0x9b, 0xc3, 0xc2, 0xf0, 0xa8, + 0x7c, 0xe4, 0xc3, 0x92, 0xad, 0xca, 0xcc, 0x58, 0x69, 0x71, 0x6a, 0xa5, 0x22, 0x62, 0x35, 0x87, + 0x05, 0xe1, 0x51, 0xd9, 0xe8, 0xbb, 0xb0, 0x12, 0x02, 0x47, 0x6b, 0x7d, 0x11, 0xf5, 0xb3, 0x8d, + 0xd5, 0xd3, 0x41, 0x6d, 0xa5, 0x39, 0x91, 0x0a, 0xdf, 0x47, 0x02, 0xb2, 0x61, 0xd6, 0x95, 0x59, + 0x72, 0x49, 0x64, 0x36, 0x5f, 0x4b, 0xb7, 0x8a, 0xd8, 0xfb, 0xeb, 0x7a, 0x76, 0x1c, 0xf5, 0x6d, + 0x54, 0x62, 0xac, 0x64, 0xa3, 0xb7, 0xa0, 0x64, 0x79, 0x9e, 0x1f, 0x58, 0xb2, 0xfb, 0x50, 0x16, + 0xaa, 0x36, 0xa6, 0x56, 0xb5, 0x11, 0xcb, 0x18, 0xca, 0xc6, 0x35, 0x0c, 0xd6, 0x55, 0xa1, 0x7b, + 0xb0, 0xe0, 0xdf, 0xf3, 0x08, 0xc5, 0xe4, 0x90, 0x50, 0xe2, 0xb5, 0x08, 0xab, 0x56, 0x84, 0xf6, + 0x67, 0x52, 0x6a, 0x4f, 0x30, 0xc7, 0x2e, 0x9d, 0x84, 0x33, 0x3c, 0xac, 0x05, 0xd5, 0x79, 0x6c, + 0xf5, 0x2c, 0xd7, 0xf9, 0x3e, 0xa1, 0xac, 0x3a, 0x1f, 0x37, 0xac, 0xb7, 0x22, 0x28, 0xd6, 0x28, + 0x50, 0x0f, 0x2a, 0x1d, 0xfd, 0xc9, 0xa8, 0x2e, 0x09, 0x33, 0xaf, 0xa5, 0x33, 0x73, 0xf4, 0x51, + 0x8b, 0xd3, 0xa0, 0x04, 0x0e, 0x27, 0xb5, 0xac, 0x3c, 0x07, 0xa5, 0x4f, 0x59, 0x21, 0xf0, 0x0a, + 0x63, 0xf8, 0x40, 0xa6, 0xaa, 0x30, 0xfe, 0x98, 0x81, 0xf9, 0xe4, 0x36, 0x0e, 0x3d, 0x87, 0xf9, + 0x54, 0xcf, 0x61, 0x58, 0xcb, 0x1a, 0x13, 0x27, 0x17, 0x61, 0x7c, 0xce, 0x4e, 0x8c, 0xcf, 0x2a, + 0x0c, 0xe6, 0x1e, 0x24, 0x0c, 0xd6, 0x01, 0x78, 0xb2, 0x42, 0x7d, 0xd7, 0x25, 0x54, 0x44, 0xc0, + 0x82, 0x9a, 0x50, 0x44, 0x50, 0xac, 0x51, 0xf0, 0x94, 0xfa, 0xc0, 0xf5, 0x5b, 0xc7, 0x62, 0x0b, + 0xc2, 0xdb, 0x2b, 0x62, 0x5f, 0x41, 0xa6, 0xd4, 0x8d, 0x11, 0x2c, 0x1e, 0xc3, 0x61, 0xf6, 0xe1, + 0xe2, 0x9e, 0x45, 0x79, 0x92, 0x13, 0xdf, 0x14, 0x51, 0xb3, 0xbc, 0x31, 0x52, 0x11, 0x3d, 0x3d, + 0xed, 0x8d, 0x8b, 0x37, 0x3f, 0x86, 0xc5, 0x55, 0x91, 0xf9, 0x57, 0x03, 0x2e, 0x8d, 0xd5, 0xfd, + 0x19, 0x54, 0x64, 0x6f, 0x24, 0x2b, 0xb2, 0xe7, 0x53, 0xb6, 0x32, 0xc7, 0x59, 0x3b, 0xa1, 0x3e, + 0x9b, 0x83, 0xfc, 0x1e, 0xcf, 0x84, 0xcd, 0x0f, 0x0d, 0x28, 0x8b, 0x5f, 0xd3, 0x74, 0x92, 0x6b, + 0xc9, 0x01, 0x43, 0xf1, 0xe1, 0x0d, 0x17, 0x1e, 0x46, 0xab, 0xf9, 0x1d, 0x03, 0x92, 0x3d, 0x5c, + 0xf4, 0xa2, 0xbc, 0x02, 0x46, 0xd4, 0x64, 0x9d, 0xd2, 0xfd, 0x5f, 0x98, 0x54, 0x92, 0x5e, 0x48, + 0xd5, 0xad, 0x7c, 0x12, 0x8a, 0xd8, 0xf7, 0x83, 0x3d, 0x2b, 0x38, 0x62, 0x7c, 0xef, 0xba, 0xfc, + 0x87, 0xda, 0x5e, 0xb1, 0x77, 0x02, 0x83, 0x25, 0xdc, 0xfc, 0xb9, 0x01, 0x97, 0x26, 0xce, 0x8d, + 0x78, 0x14, 0x69, 0x45, 0x5f, 0x6a, 0x45, 0x91, 0x23, 0xc7, 0x74, 0x58, 0xa3, 0xe2, 0xb5, 0x64, + 0x62, 0xd8, 0x34, 0x5c, 0x4b, 0x26, 0xb4, 0xe1, 0x24, 0xad, 0xf9, 0xcf, 0x0c, 0xa8, 0x41, 0xcd, + 0xff, 0xd8, 0xe9, 0x1f, 0x1f, 0x1a, 0x13, 0xcd, 0x27, 0xc7, 0x44, 0xd1, 0x4c, 0x48, 0x9b, 0x93, + 0x64, 0xef, 0x3f, 0x27, 0x41, 0xcf, 0x46, 0xa3, 0x17, 0xe9, 0x43, 0xab, 0xc9, 0xd1, 0xcb, 0xd9, + 0xa0, 0x56, 0x56, 0xc2, 0x93, 0xa3, 0x98, 0xd7, 0x60, 0xce, 0x26, 0x81, 0xe5, 0xb8, 0xb2, 0x2e, + 0x4c, 0x3d, 0x4c, 0x90, 0xc2, 0x9a, 0x92, 0xb5, 0x51, 0xe2, 0x36, 0xa9, 0x0f, 0x1c, 0x0a, 0xe4, + 0x01, 0xbb, 0xe5, 0xdb, 0xb2, 0x22, 0xc9, 0xc7, 0x01, 0x7b, 0xd3, 0xb7, 0x09, 0x16, 0x18, 0xf3, + 0x5d, 0x03, 0x4a, 0x52, 0xd2, 0xa6, 0xd5, 0x63, 0x04, 0x5d, 0x89, 0x56, 0x21, 0x8f, 0xfb, 0x92, + 0x3e, 0x63, 0x3b, 0x1b, 0xd4, 0x8a, 0x82, 0x4c, 0x14, 0x33, 0x63, 0x66, 0x49, 0x99, 0x73, 0xf6, + 0xe8, 0x51, 0xc8, 0x8b, 0x0b, 0xa4, 0x36, 0x33, 0x1e, 0x16, 0x72, 0x20, 0x96, 0x38, 0xf3, 0xe3, + 0x0c, 0x54, 0x12, 0x8b, 0x4b, 0x51, 0x17, 0x44, 0x2d, 0xd4, 0x4c, 0x8a, 0xb6, 0xfc, 0xe4, 0xd1, + 0xbc, 0x7a, 0xbe, 0x66, 0x1f, 0xe4, 0xf9, 0xfa, 0x36, 0xcc, 0xb6, 0xf8, 0x1e, 0x85, 0xff, 0xf4, + 0xb8, 0x32, 0xcd, 0x71, 0x8a, 0xdd, 0x8d, 0xbd, 0x51, 0x7c, 0x32, 0xac, 0x04, 0xa2, 0x9b, 0xb0, + 0x44, 0x49, 0x40, 0xfb, 0x1b, 0x87, 0x01, 0xa1, 0x7a, 0x33, 0x21, 0x1f, 0x67, 0xdf, 0x78, 0x98, + 0x00, 0x8f, 0xf2, 0x98, 0x07, 0x50, 0xbe, 0x6d, 0x1d, 0xb8, 0xd1, 0x78, 0x0c, 0x43, 0xc5, 0xf1, + 0x5a, 0x6e, 0xcf, 0x26, 0x32, 0xa0, 0x87, 0xd1, 0x2b, 0xbc, 0xb4, 0xdb, 0x3a, 0xf2, 0x6c, 0x50, + 0xbb, 0x90, 0x00, 0xc8, 0x79, 0x10, 0x4e, 0x8a, 0x30, 0x5d, 0xc8, 0x7d, 0x86, 0x95, 0xe4, 0x77, + 0xa0, 0x18, 0xe7, 0xfa, 0x0f, 0x59, 0xa5, 0xf9, 0x06, 0x14, 0xb8, 0xc7, 0x87, 0x35, 0xea, 0x39, + 0x59, 0x52, 0x32, 0xf7, 0xca, 0xa4, 0xc9, 0xbd, 0xc4, 0x90, 0xf5, 0x4e, 0xd7, 0x7e, 0xc0, 0x21, + 0x6b, 0xe6, 0x41, 0x5e, 0xbe, 0xec, 0x94, 0x2f, 0xdf, 0x55, 0x90, 0x7f, 0x44, 0xe1, 0x8f, 0x8c, + 0x4c, 0x20, 0xb4, 0x47, 0x46, 0x7f, 0xff, 0xb5, 0x09, 0xc3, 0x8f, 0x0d, 0x00, 0xd1, 0xca, 0x13, + 0x6d, 0xa4, 0x14, 0xe3, 0xfc, 0x3b, 0x30, 0xeb, 0x4b, 0x8f, 0x94, 0x83, 0xd6, 0x29, 0xfb, 0xc5, + 0xd1, 0x45, 0x92, 0x3e, 0x89, 0x95, 0xb0, 0xc6, 0xcb, 0x1f, 0x7c, 0xb2, 0x3a, 0xf3, 0xe1, 0x27, + 0xab, 0x33, 0x1f, 0x7d, 0xb2, 0x3a, 0xf3, 0xf6, 0xe9, 0xaa, 0xf1, 0xc1, 0xe9, 0xaa, 0xf1, 0xe1, + 0xe9, 0xaa, 0xf1, 0xd1, 0xe9, 0xaa, 0xf1, 0xf1, 0xe9, 0xaa, 0xf1, 0xee, 0xdf, 0x57, 0x67, 0x5e, + 0x7b, 0x2c, 0xcd, 0x1f, 0xfc, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x28, 0x27, 0x65, 0xab, 0x20, + 0x28, 0x00, 0x00, } func (m *APIGroup) Marshal() (dAtA []byte, err error) { @@ -2505,6 +2505,16 @@ func (m *ListOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SendInitialEvents != nil { + i-- + if *m.SendInitialEvents { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } i -= len(m.ResourceVersionMatch) copy(dAtA[i:], m.ResourceVersionMatch) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersionMatch))) @@ -2665,11 +2675,6 @@ func (m *ObjectMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x8a } } - i -= len(m.ZZZ_DeprecatedClusterName) - copy(dAtA[i:], m.ZZZ_DeprecatedClusterName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ZZZ_DeprecatedClusterName))) - i-- - dAtA[i] = 0x7a if len(m.Finalizers) > 0 { for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Finalizers[iNdEx]) @@ -3915,6 +3920,9 @@ func (m *ListOptions) Size() (n int) { n += 2 l = len(m.ResourceVersionMatch) n += 1 + l + sovGenerated(uint64(l)) + if m.SendInitialEvents != nil { + n += 2 + } return n } @@ -4001,8 +4009,6 @@ func (m *ObjectMeta) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - l = len(m.ZZZ_DeprecatedClusterName) - n += 1 + l + sovGenerated(uint64(l)) if len(m.ManagedFields) > 0 { for _, e := range m.ManagedFields { l = e.Size() @@ -4526,6 +4532,7 @@ func (this *ListOptions) String() string { `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, `AllowWatchBookmarks:` + fmt.Sprintf("%v", this.AllowWatchBookmarks) + `,`, `ResourceVersionMatch:` + fmt.Sprintf("%v", this.ResourceVersionMatch) + `,`, + `SendInitialEvents:` + valueToStringGenerated(this.SendInitialEvents) + `,`, `}`, }, "") return s @@ -4595,7 +4602,6 @@ func (this *ObjectMeta) String() string { `Annotations:` + mapStringForAnnotations + `,`, `OwnerReferences:` + repeatedStringForOwnerReferences + `,`, `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, - `ZZZ_DeprecatedClusterName:` + fmt.Sprintf("%v", this.ZZZ_DeprecatedClusterName) + `,`, `ManagedFields:` + repeatedStringForManagedFields + `,`, `}`, }, "") @@ -8260,6 +8266,27 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } m.ResourceVersionMatch = ResourceVersionMatch(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SendInitialEvents", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.SendInitialEvents = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9212,38 +9239,6 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } m.Finalizers = append(m.Finalizers, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ZZZ_DeprecatedClusterName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ZZZ_DeprecatedClusterName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 17: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ManagedFields", wireType) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index b6c773515..a2cd8015f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -162,17 +162,18 @@ message ApplyOptions { // Condition contains details for one aspect of the current state of this API Resource. // --- // This struct is intended for direct use as an array at the field path .status.conditions. For example, -// type FooStatus struct{ -// // Represents the observations of a foo's current state. -// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" -// // +patchMergeKey=type -// // +patchStrategy=merge -// // +listType=map -// // +listMapKey=type -// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` // -// // other fields -// } +// type FooStatus struct{ +// // Represents the observations of a foo's current state. +// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" +// // +patchMergeKey=type +// // +patchStrategy=merge +// // +listType=map +// // +listMapKey=type +// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +// +// // other fields +// } message Condition { // type of condition in CamelCase or in foo.example.com/CamelCase. // --- @@ -245,19 +246,16 @@ message CreateOptions { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -427,8 +425,6 @@ message LabelSelector { // relates the key and values. message LabelSelectorRequirement { // key is the label key that the selector applies to. - // +patchMergeKey=key - // +patchStrategy=merge optional string key = 1; // operator represents a key's relationship to a set of values. @@ -574,6 +570,32 @@ message ListOptions { // This field is not supported when watch is true. Clients may start a watch from the last // resourceVersion value returned by the server and not miss any modifications. optional string continue = 8; + + // `sendInitialEvents=true` may be set together with `watch=true`. + // In that case, the watch stream will begin with synthetic events to + // produce the current state of objects in the collection. Once all such + // events have been sent, a synthetic "Bookmark" event will be sent. + // The bookmark will report the ResourceVersion (RV) corresponding to the + // set of objects, and be marked with `"k8s.io/initial-events-end": "true"` annotation. + // Afterwards, the watch stream will proceed as usual, sending watch events + // corresponding to changes (subsequent to the RV) to objects watched. + // + // When `sendInitialEvents` option is set, we require `resourceVersionMatch` + // option to also be set. The semantic of the watch request is as following: + // - `resourceVersionMatch` = NotOlderThan + // is interpreted as "data at least as new as the provided `resourceVersion`" + // and the bookmark event is send when the state is synced + // to a `resourceVersion` at least as fresh as the one provided by the ListOptions. + // If `resourceVersion` is unset, this is interpreted as "consistent read" and the + // bookmark event is send when the state is synced at least to the moment + // when request started being processed. + // - `resourceVersionMatch` set to any other value or unset + // Invalid error is returned. + // + // Defaults to true if `resourceVersion=""` or `resourceVersion="0"` (for backward + // compatibility reasons) and to false otherwise. + // +optional + optional bool sendInitialEvents = 11; } // ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource @@ -644,7 +666,7 @@ message ObjectMeta { // automatically. Name is primarily intended for creation idempotence and configuration // definition. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names // +optional optional string name = 1; @@ -670,7 +692,7 @@ message ObjectMeta { // // Must be a DNS_LABEL. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/namespaces + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces // +optional optional string namespace = 3; @@ -684,7 +706,7 @@ message ObjectMeta { // // Populated by the system. // Read-only. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids // +optional optional string uid = 5; @@ -748,14 +770,14 @@ message ObjectMeta { // Map of string keys and values that can be used to organize and categorize // (scope and select) objects. May match selectors of replication controllers // and services. - // More info: http://kubernetes.io/docs/user-guide/labels + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels // +optional map labels = 11; // Annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. - // More info: http://kubernetes.io/docs/user-guide/annotations + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations // +optional map annotations = 12; @@ -785,15 +807,6 @@ message ObjectMeta { // +patchStrategy=merge repeated string finalizers = 14; - // Deprecated: ClusterName is a legacy field that was always cleared by - // the system and never used; it will be removed completely in 1.25. - // - // The name in the go struct is changed to help clients detect - // accidental use. - // - // +optional - optional string clusterName = 15; - // ManagedFields maps workflow-id and version to the set of fields // that are managed by that workflow. This is mostly for internal // housekeeping, and users typically shouldn't need to set or @@ -819,11 +832,11 @@ message OwnerReference { optional string kind = 1; // Name of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names optional string name = 3; // UID of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids optional string uid = 4; // If true, this reference points to the managing controller. @@ -897,19 +910,16 @@ message PatchOptions { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -1032,7 +1042,7 @@ message StatusDetails { // UID of the resource. // (when there is a single resource which can be described). - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids // +optional optional string uid = 6; @@ -1136,19 +1146,16 @@ message UpdateOptions { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index 2b6a30b65..592dcb8a7 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -58,7 +58,7 @@ func LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) { case LabelSelectorOpDoesNotExist: op = selection.DoesNotExist default: - return nil, fmt.Errorf("%q is not a valid pod selector operator", expr.Operator) + return nil, fmt.Errorf("%q is not a valid label selector operator", expr.Operator) } r, err := labels.NewRequirement(expr.Key, op, append([]string(nil), expr.Values...)) if err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go index 1ea90de1e..92d3ed5e0 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go @@ -59,8 +59,6 @@ type Object interface { SetFinalizers(finalizers []string) GetOwnerReferences() []OwnerReference SetOwnerReferences([]OwnerReference) - GetZZZ_DeprecatedClusterName() string - SetZZZ_DeprecatedClusterName(clusterName string) GetManagedFields() []ManagedFieldsEntry SetManagedFields(managedFields []ManagedFieldsEntry) } @@ -172,10 +170,6 @@ func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference { return m func (meta *ObjectMeta) SetOwnerReferences(references []OwnerReference) { meta.OwnerReferences = references } -func (meta *ObjectMeta) GetZZZ_DeprecatedClusterName() string { return meta.ZZZ_DeprecatedClusterName } -func (meta *ObjectMeta) SetZZZ_DeprecatedClusterName(clusterName string) { - meta.ZZZ_DeprecatedClusterName = clusterName -} func (meta *ObjectMeta) GetManagedFields() []ManagedFieldsEntry { return meta.ManagedFields } func (meta *ObjectMeta) SetManagedFields(managedFields []ManagedFieldsEntry) { meta.ManagedFields = managedFields diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go index 6dd6d8999..ab68181e9 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go @@ -27,9 +27,12 @@ func (m *MicroTime) ProtoMicroTime() *Timestamp { if m == nil { return &Timestamp{} } + + // truncate precision to microseconds to match JSON marshaling/unmarshaling + truncatedNanoseconds := time.Duration(m.Time.Nanosecond()).Truncate(time.Microsecond) return &Timestamp{ Seconds: m.Time.Unix(), - Nanos: int32(m.Time.Nanosecond()), + Nanos: int32(truncatedNanoseconds), } } @@ -51,7 +54,10 @@ func (m *MicroTime) Unmarshal(data []byte) error { if err := p.Unmarshal(data); err != nil { return err } - m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local() + + // truncate precision to microseconds to match JSON marshaling/unmarshaling + truncatedNanoseconds := time.Duration(p.Nanos).Truncate(time.Microsecond) + m.Time = time.Unix(p.Seconds, int64(truncatedNanoseconds)).Local() return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index eb071d410..8a8ff7018 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -17,10 +17,11 @@ limitations under the License. // Package v1 contains API types that are common to all versions. // // The package contains two categories of types: -// - external (serialized) types that lack their own version (e.g TypeMeta) -// - internal (never-serialized) types that are needed by several different -// api groups, and so live here, to avoid duplication and/or import loops -// (e.g. LabelSelector). +// - external (serialized) types that lack their own version (e.g TypeMeta) +// - internal (never-serialized) types that are needed by several different +// api groups, and so live here, to avoid duplication and/or import loops +// (e.g. LabelSelector). +// // In the future, we will probably move these categories of objects into // separate packages. package v1 @@ -113,7 +114,7 @@ type ObjectMeta struct { // automatically. Name is primarily intended for creation idempotence and configuration // definition. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names // +optional Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` @@ -139,7 +140,7 @@ type ObjectMeta struct { // // Must be a DNS_LABEL. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/namespaces + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces // +optional Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` @@ -153,7 +154,7 @@ type ObjectMeta struct { // // Populated by the system. // Read-only. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids // +optional UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` @@ -217,14 +218,14 @@ type ObjectMeta struct { // Map of string keys and values that can be used to organize and categorize // (scope and select) objects. May match selectors of replication controllers // and services. - // More info: http://kubernetes.io/docs/user-guide/labels + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels // +optional Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` // Annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. - // More info: http://kubernetes.io/docs/user-guide/annotations + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations // +optional Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` @@ -254,14 +255,9 @@ type ObjectMeta struct { // +patchStrategy=merge Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` - // Deprecated: ClusterName is a legacy field that was always cleared by - // the system and never used; it will be removed completely in 1.25. - // - // The name in the go struct is changed to help clients detect - // accidental use. - // - // +optional - ZZZ_DeprecatedClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"` + // Tombstone: ClusterName was a legacy field that was always cleared by + // the system and never used. + // ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"` // ManagedFields maps workflow-id and version to the set of fields // that are managed by that workflow. This is mostly for internal @@ -299,10 +295,10 @@ type OwnerReference struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // UID of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids UID types.UID `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` // If true, this reference points to the managing controller. // +optional @@ -404,6 +400,32 @@ type ListOptions struct { // This field is not supported when watch is true. Clients may start a watch from the last // resourceVersion value returned by the server and not miss any modifications. Continue string `json:"continue,omitempty" protobuf:"bytes,8,opt,name=continue"` + + // `sendInitialEvents=true` may be set together with `watch=true`. + // In that case, the watch stream will begin with synthetic events to + // produce the current state of objects in the collection. Once all such + // events have been sent, a synthetic "Bookmark" event will be sent. + // The bookmark will report the ResourceVersion (RV) corresponding to the + // set of objects, and be marked with `"k8s.io/initial-events-end": "true"` annotation. + // Afterwards, the watch stream will proceed as usual, sending watch events + // corresponding to changes (subsequent to the RV) to objects watched. + // + // When `sendInitialEvents` option is set, we require `resourceVersionMatch` + // option to also be set. The semantic of the watch request is as following: + // - `resourceVersionMatch` = NotOlderThan + // is interpreted as "data at least as new as the provided `resourceVersion`" + // and the bookmark event is send when the state is synced + // to a `resourceVersion` at least as fresh as the one provided by the ListOptions. + // If `resourceVersion` is unset, this is interpreted as "consistent read" and the + // bookmark event is send when the state is synced at least to the moment + // when request started being processed. + // - `resourceVersionMatch` set to any other value or unset + // Invalid error is returned. + // + // Defaults to true if `resourceVersion=""` or `resourceVersion="0"` (for backward + // compatibility reasons) and to false otherwise. + // +optional + SendInitialEvents *bool `json:"sendInitialEvents,omitempty" protobuf:"varint,11,opt,name=sendInitialEvents"` } // resourceVersionMatch specifies how the resourceVersion parameter is applied. resourceVersionMatch @@ -546,19 +568,16 @@ type CreateOptions struct { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -601,19 +620,16 @@ type PatchOptions struct { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -678,19 +694,16 @@ type UpdateOptions struct { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -765,7 +778,7 @@ type StatusDetails struct { Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` // UID of the resource. // (when there is a single resource which can be described). - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids // +optional UID types.UID `json:"uid,omitempty" protobuf:"bytes,6,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` // The Causes array includes more details associated with the StatusReason @@ -982,6 +995,24 @@ const ( // CauseTypeFieldValueNotSupported is used to report valid (as per formatting rules) // values that can not be handled (e.g. an enumerated string). CauseTypeFieldValueNotSupported CauseType = "FieldValueNotSupported" + // CauseTypeForbidden is used to report valid (as per formatting rules) + // values which would be accepted under some conditions, but which are not + // permitted by the current conditions (such as security policy). See + // Forbidden(). + CauseTypeForbidden CauseType = "FieldValueForbidden" + // CauseTypeTooLong is used to report that the given value is too long. + // This is similar to ErrorTypeInvalid, but the error will not include the + // too-long value. See TooLong(). + CauseTypeTooLong CauseType = "FieldValueTooLong" + // CauseTypeTooMany is used to report "too many". This is used to + // report that a given list has too many items. This is similar to FieldValueTooLong, + // but the error indicates quantity instead of length. + CauseTypeTooMany CauseType = "FieldValueTooMany" + // CauseTypeInternal is used to report other errors that are not related + // to user input. See InternalError(). + CauseTypeInternal CauseType = "InternalError" + // CauseTypeTypeInvalid is for the value did not match the schema type for that field + CauseTypeTypeInvalid CauseType = "FieldValueTypeInvalid" // CauseTypeUnexpectedServerResponse is used to report when the server responded to the client // without the expected return type. The presence of this cause indicates the error may be // due to an intervening proxy or the server software malfunctioning. @@ -1194,9 +1225,7 @@ type LabelSelector struct { // relates the key and values. type LabelSelectorRequirement struct { // key is the label key that the selector applies to. - // +patchMergeKey=key - // +patchStrategy=merge - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` // operator represents a key's relationship to a set of values. // Valid operators are In, NotIn, Exists and DoesNotExist. Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` @@ -1453,17 +1482,18 @@ type PartialObjectMetadataList struct { // Condition contains details for one aspect of the current state of this API Resource. // --- // This struct is intended for direct use as an array at the field path .status.conditions. For example, -// type FooStatus struct{ -// // Represents the observations of a foo's current state. -// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" -// // +patchMergeKey=type -// // +patchStrategy=merge -// // +listType=map -// // +listMapKey=type -// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` // -// // other fields -// } +// type FooStatus struct{ +// // Represents the observations of a foo's current state. +// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" +// // +patchMergeKey=type +// // +patchStrategy=merge +// // +listType=map +// // +listMapKey=type +// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +// +// // other fields +// } type Condition struct { // type of condition in CamelCase or in foo.example.com/CamelCase. // --- diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index d002b03c8..b736e8371 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_APIGroup = map[string]string{ @@ -115,7 +115,7 @@ var map_CreateOptions = map[string]string{ "": "CreateOptions may be provided when creating an API object.", "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", } func (CreateOptions) SwaggerDoc() map[string]string { @@ -216,6 +216,7 @@ var map_ListOptions = map[string]string{ "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "sendInitialEvents": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", } func (ListOptions) SwaggerDoc() map[string]string { @@ -239,21 +240,20 @@ func (ManagedFieldsEntry) SwaggerDoc() map[string]string { var map_ObjectMeta = map[string]string{ "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", - "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will return a 409.\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", - "namespace": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "namespace": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces", "selfLink": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.", - "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", - "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", - "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.", - "clusterName": "Deprecated: ClusterName is a legacy field that was always cleared by the system and never used; it will be removed completely in 1.25.\n\nThe name in the go struct is changed to help clients detect accidental use.", "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.", } @@ -265,8 +265,8 @@ var map_OwnerReference = map[string]string{ "": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", "apiVersion": "API version of the referent.", "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", + "uid": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", "controller": "If true, this reference points to the managing controller.", "blockOwnerDeletion": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", } @@ -307,7 +307,7 @@ var map_PatchOptions = map[string]string{ "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "force": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", - "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", } func (PatchOptions) SwaggerDoc() map[string]string { @@ -373,7 +373,7 @@ var map_StatusDetails = map[string]string{ "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", "group": "The group attribute of the resource associated with the status StatusReason.", "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "uid": "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "uid": "UID of the resource. (when there is a single resource which can be described). More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.", } @@ -452,7 +452,7 @@ var map_UpdateOptions = map[string]string{ "": "UpdateOptions may be provided when updating an API object. All fields in UpdateOptions should also be present in PatchOptions.", "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", } func (UpdateOptions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index f0fad90fd..40d289f37 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -101,6 +101,11 @@ func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error { return nil } +func (obj *Unstructured) EachListItemWithAlloc(fn func(runtime.Object) error) error { + // EachListItem has allocated a new Object for the user, we can use it directly. + return obj.EachListItem(fn) +} + func (obj *Unstructured) UnstructuredContent() map[string]interface{} { if obj.Object == nil { return make(map[string]interface{}) @@ -444,18 +449,6 @@ func (u *Unstructured) SetFinalizers(finalizers []string) { u.setNestedStringSlice(finalizers, "metadata", "finalizers") } -func (u *Unstructured) GetZZZ_DeprecatedClusterName() string { - return getNestedString(u.Object, "metadata", "clusterName") -} - -func (u *Unstructured) SetZZZ_DeprecatedClusterName(clusterName string) { - if len(clusterName) == 0 { - RemoveNestedField(u.Object, "metadata", "clusterName") - return - } - u.setNestedField(clusterName, "metadata", "clusterName") -} - func (u *Unstructured) GetManagedFields() []metav1.ManagedFieldsEntry { items, found, err := NestedSlice(u.Object, "metadata", "managedFields") if !found || err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go index 5028f5fb5..82beda2a2 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go @@ -52,6 +52,15 @@ func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error { return nil } +func (u *UnstructuredList) EachListItemWithAlloc(fn func(runtime.Object) error) error { + for i := range u.Items { + if err := fn(&Unstructured{Object: u.Items[i].Object}); err != nil { + return err + } + } + return nil +} + // NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. // This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. func (u *UnstructuredList) NewEmptyInstance() runtime.Unstructured { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go new file mode 100644 index 000000000..a0f709ad8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go @@ -0,0 +1,320 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "regexp" + "unicode" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// LabelSelectorValidationOptions is a struct that can be passed to ValidateLabelSelector to record the validate options +type LabelSelectorValidationOptions struct { + // Allow invalid label value in selector + AllowInvalidLabelValueInSelector bool +} + +// LabelSelectorHasInvalidLabelValue returns true if the given selector contains an invalid label value in a match expression. +// This is useful for determining whether AllowInvalidLabelValueInSelector should be set to true when validating an update +// based on existing persisted invalid values. +func LabelSelectorHasInvalidLabelValue(ps *metav1.LabelSelector) bool { + if ps == nil { + return false + } + for _, e := range ps.MatchExpressions { + for _, v := range e.Values { + if len(validation.IsValidLabelValue(v)) > 0 { + return true + } + } + } + return false +} + +// ValidateLabelSelector validate the LabelSelector according to the opts and returns any validation errors. +// opts.AllowInvalidLabelValueInSelector is only expected to be set to true when required for backwards compatibility with existing invalid data. +func ValidateLabelSelector(ps *metav1.LabelSelector, opts LabelSelectorValidationOptions, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if ps == nil { + return allErrs + } + allErrs = append(allErrs, ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...) + for i, expr := range ps.MatchExpressions { + allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, opts, fldPath.Child("matchExpressions").Index(i))...) + } + return allErrs +} + +// ValidateLabelSelectorRequirement validate the requirement according to the opts and returns any validation errors. +// opts.AllowInvalidLabelValueInSelector is only expected to be set to true when required for backwards compatibility with existing invalid data. +func ValidateLabelSelectorRequirement(sr metav1.LabelSelectorRequirement, opts LabelSelectorValidationOptions, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + switch sr.Operator { + case metav1.LabelSelectorOpIn, metav1.LabelSelectorOpNotIn: + if len(sr.Values) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) + } + case metav1.LabelSelectorOpExists, metav1.LabelSelectorOpDoesNotExist: + if len(sr.Values) > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) + } + default: + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) + } + allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...) + if !opts.AllowInvalidLabelValueInSelector { + for valueIndex, value := range sr.Values { + for _, msg := range validation.IsValidLabelValue(value) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("values").Index(valueIndex), value, msg)) + } + } + } + return allErrs +} + +// ValidateLabelName validates that the label name is correctly defined. +func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(labelName) { + allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg)) + } + return allErrs +} + +// ValidateLabels validates that a set of labels are correctly defined. +func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for k, v := range labels { + allErrs = append(allErrs, ValidateLabelName(k, fldPath)...) + for _, msg := range validation.IsValidLabelValue(v) { + allErrs = append(allErrs, field.Invalid(fldPath, v, msg)) + } + } + return allErrs +} + +func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList { + allErrs := field.ErrorList{} + //lint:file-ignore SA1019 Keep validation for deprecated OrphanDependents option until it's being removed + if options.OrphanDependents != nil && options.PropagationPolicy != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("propagationPolicy"), options.PropagationPolicy, "orphanDependents and deletionPropagation cannot be both set")) + } + if options.PropagationPolicy != nil && + *options.PropagationPolicy != metav1.DeletePropagationForeground && + *options.PropagationPolicy != metav1.DeletePropagationBackground && + *options.PropagationPolicy != metav1.DeletePropagationOrphan { + allErrs = append(allErrs, field.NotSupported(field.NewPath("propagationPolicy"), options.PropagationPolicy, []string{string(metav1.DeletePropagationForeground), string(metav1.DeletePropagationBackground), string(metav1.DeletePropagationOrphan), "nil"})) + } + allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) + return allErrs +} + +func ValidateCreateOptions(options *metav1.CreateOptions) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager"))...) + allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) + allErrs = append(allErrs, ValidateFieldValidation(field.NewPath("fieldValidation"), options.FieldValidation)...) + return allErrs +} + +func ValidateUpdateOptions(options *metav1.UpdateOptions) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager"))...) + allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) + allErrs = append(allErrs, ValidateFieldValidation(field.NewPath("fieldValidation"), options.FieldValidation)...) + return allErrs +} + +func ValidatePatchOptions(options *metav1.PatchOptions, patchType types.PatchType) field.ErrorList { + allErrs := field.ErrorList{} + if patchType != types.ApplyPatchType { + if options.Force != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("force"), "may not be specified for non-apply patch")) + } + } else { + if options.FieldManager == "" { + // This field is defaulted to "kubectl" by kubectl, but HAS TO be explicitly set by controllers. + allErrs = append(allErrs, field.Required(field.NewPath("fieldManager"), "is required for apply patch")) + } + } + allErrs = append(allErrs, ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager"))...) + allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) + allErrs = append(allErrs, ValidateFieldValidation(field.NewPath("fieldValidation"), options.FieldValidation)...) + return allErrs +} + +var FieldManagerMaxLength = 128 + +// ValidateFieldManager valides that the fieldManager is the proper length and +// only has printable characters. +func ValidateFieldManager(fieldManager string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // the field can not be set as a `*string`, so a empty string ("") is + // considered as not set and is defaulted by the rest of the process + // (unless apply is used, in which case it is required). + if len(fieldManager) > FieldManagerMaxLength { + allErrs = append(allErrs, field.TooLong(fldPath, fieldManager, FieldManagerMaxLength)) + } + // Verify that all characters are printable. + for i, r := range fieldManager { + if !unicode.IsPrint(r) { + allErrs = append(allErrs, field.Invalid(fldPath, fieldManager, fmt.Sprintf("invalid character %#U (at position %d)", r, i))) + } + } + + return allErrs +} + +var allowedDryRunValues = sets.NewString(metav1.DryRunAll) + +// ValidateDryRun validates that a dryRun query param only contains allowed values. +func ValidateDryRun(fldPath *field.Path, dryRun []string) field.ErrorList { + allErrs := field.ErrorList{} + if !allowedDryRunValues.HasAll(dryRun...) { + allErrs = append(allErrs, field.NotSupported(fldPath, dryRun, allowedDryRunValues.List())) + } + return allErrs +} + +var allowedFieldValidationValues = sets.NewString("", metav1.FieldValidationIgnore, metav1.FieldValidationWarn, metav1.FieldValidationStrict) + +// ValidateFieldValidation validates that a fieldValidation query param only contains allowed values. +func ValidateFieldValidation(fldPath *field.Path, fieldValidation string) field.ErrorList { + allErrs := field.ErrorList{} + if !allowedFieldValidationValues.Has(fieldValidation) { + allErrs = append(allErrs, field.NotSupported(fldPath, fieldValidation, allowedFieldValidationValues.List())) + } + return allErrs + +} + +const UninitializedStatusUpdateErrorMsg string = `must not update status when the object is uninitialized` + +// ValidateTableOptions returns any invalid flags on TableOptions. +func ValidateTableOptions(opts *metav1.TableOptions) field.ErrorList { + var allErrs field.ErrorList + switch opts.IncludeObject { + case metav1.IncludeMetadata, metav1.IncludeNone, metav1.IncludeObject, "": + default: + allErrs = append(allErrs, field.Invalid(field.NewPath("includeObject"), opts.IncludeObject, "must be 'Metadata', 'Object', 'None', or empty")) + } + return allErrs +} + +const MaxSubresourceNameLength = 256 + +func ValidateManagedFields(fieldsList []metav1.ManagedFieldsEntry, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + for i, fields := range fieldsList { + fldPath := fldPath.Index(i) + switch fields.Operation { + case metav1.ManagedFieldsOperationApply, metav1.ManagedFieldsOperationUpdate: + default: + allErrs = append(allErrs, field.Invalid(fldPath.Child("operation"), fields.Operation, "must be `Apply` or `Update`")) + } + if len(fields.FieldsType) > 0 && fields.FieldsType != "FieldsV1" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldsType"), fields.FieldsType, "must be `FieldsV1`")) + } + allErrs = append(allErrs, ValidateFieldManager(fields.Manager, fldPath.Child("manager"))...) + + if len(fields.Subresource) > MaxSubresourceNameLength { + allErrs = append(allErrs, field.TooLong(fldPath.Child("subresource"), fields.Subresource, MaxSubresourceNameLength)) + } + } + return allErrs +} + +func ValidateConditions(conditions []metav1.Condition, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + conditionTypeToFirstIndex := map[string]int{} + for i, condition := range conditions { + if _, ok := conditionTypeToFirstIndex[condition.Type]; ok { + allErrs = append(allErrs, field.Duplicate(fldPath.Index(i).Child("type"), condition.Type)) + } else { + conditionTypeToFirstIndex[condition.Type] = i + } + + allErrs = append(allErrs, ValidateCondition(condition, fldPath.Index(i))...) + } + + return allErrs +} + +// validConditionStatuses is used internally to check validity and provide a good message +var validConditionStatuses = sets.NewString(string(metav1.ConditionTrue), string(metav1.ConditionFalse), string(metav1.ConditionUnknown)) + +const ( + maxReasonLen = 1 * 1024 + maxMessageLen = 32 * 1024 +) + +func ValidateCondition(condition metav1.Condition, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + // type is set and is a valid format + allErrs = append(allErrs, ValidateLabelName(condition.Type, fldPath.Child("type"))...) + + // status is set and is an accepted value + if !validConditionStatuses.Has(string(condition.Status)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("status"), condition.Status, validConditionStatuses.List())) + } + + if condition.ObservedGeneration < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("observedGeneration"), condition.ObservedGeneration, "must be greater than or equal to zero")) + } + + if condition.LastTransitionTime.IsZero() { + allErrs = append(allErrs, field.Required(fldPath.Child("lastTransitionTime"), "must be set")) + } + + if len(condition.Reason) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("reason"), "must be set")) + } else { + for _, currErr := range isValidConditionReason(condition.Reason) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("reason"), condition.Reason, currErr)) + } + if len(condition.Reason) > maxReasonLen { + allErrs = append(allErrs, field.TooLong(fldPath.Child("reason"), condition.Reason, maxReasonLen)) + } + } + + if len(condition.Message) > maxMessageLen { + allErrs = append(allErrs, field.TooLong(fldPath.Child("message"), condition.Message, maxMessageLen)) + } + + return allErrs +} + +const conditionReasonFmt string = "[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?" +const conditionReasonErrMsg string = "a condition reason must start with alphabetic character, optionally followed by a string of alphanumeric characters or '_,:', and must end with an alphanumeric character or '_'" + +var conditionReasonRegexp = regexp.MustCompile("^" + conditionReasonFmt + "$") + +// isValidConditionReason tests for a string that conforms to rules for condition reasons. This checks the format, but not the length. +func isValidConditionReason(value string) []string { + if !conditionReasonRegexp.MatchString(value) { + return []string{validation.RegexError(conditionReasonErrMsg, conditionReasonFmt, "my_name", "MY_NAME", "MyName", "ReasonA,ReasonB", "ReasonA:ReasonB")} + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go index b7590f0b3..afe01ed5a 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go @@ -426,6 +426,13 @@ func autoConvert_url_Values_To_v1_ListOptions(in *url.Values, out *ListOptions, } else { out.Continue = "" } + if values, ok := map[string][]string(*in)["sendInitialEvents"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.SendInitialEvents, s); err != nil { + return err + } + } else { + out.SendInitialEvents = nil + } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 418e6099f..7d29c504a 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -602,6 +602,11 @@ func (in *ListOptions) DeepCopyInto(out *ListOptions) { *out = new(int64) **out = **in } + if in.SendInitialEvents != nil { + in, out := &in.SendInitialEvents, &out.SendInitialEvents + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go index ef7e7c1e9..dff735dcf 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PartialObjectMetadataList = map[string]string{ diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go index d9b577227..76b76247c 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go @@ -115,10 +115,10 @@ type ConversionFuncs struct { // previously defined functions. func (c ConversionFuncs) AddUntyped(a, b interface{}, fn ConversionFunc) error { tA, tB := reflect.TypeOf(a), reflect.TypeOf(b) - if tA.Kind() != reflect.Ptr { + if tA.Kind() != reflect.Pointer { return fmt.Errorf("the type %T must be a pointer to register as an untyped conversion", a) } - if tB.Kind() != reflect.Ptr { + if tB.Kind() != reflect.Pointer { return fmt.Errorf("the type %T must be a pointer to register as an untyped conversion", b) } c.untyped[typePair{tA, tB}] = fn @@ -179,10 +179,10 @@ func (c *Converter) RegisterGeneratedUntypedConversionFunc(a, b interface{}, fn func (c *Converter) RegisterIgnoredConversion(from, to interface{}) error { typeFrom := reflect.TypeOf(from) typeTo := reflect.TypeOf(to) - if typeFrom.Kind() != reflect.Ptr { + if typeFrom.Kind() != reflect.Pointer { return fmt.Errorf("expected pointer arg for 'from' param 0, got: %v", typeFrom) } - if typeTo.Kind() != reflect.Ptr { + if typeTo.Kind() != reflect.Pointer { return fmt.Errorf("expected pointer arg for 'to' param 1, got: %v", typeTo) } c.ignoredUntypedConversions[typePair{typeFrom, typeTo}] = struct{}{} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go b/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go index f21abe1e5..25b2923f2 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go @@ -34,3 +34,14 @@ func EqualitiesOrDie(funcs ...interface{}) Equalities { } return e } + +// Performs a shallow copy of the equalities map +func (e Equalities) Copy() Equalities { + result := Equalities{reflect.Equalities{}} + + for key, value := range e.Equalities { + result.Equalities[key] = value + } + + return result +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/helper.go b/vendor/k8s.io/apimachinery/pkg/conversion/helper.go index 4ebc1ebc5..7fadd27a4 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/helper.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/helper.go @@ -26,7 +26,7 @@ import ( // Returns an error if this is not possible. func EnforcePtr(obj interface{}) (reflect.Value, error) { v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { + if v.Kind() != reflect.Pointer { if v.Kind() == reflect.Invalid { return reflect.Value{}, fmt.Errorf("expected pointer, but got invalid kind") } diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go index 2f0dd0074..b0a9246d9 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go @@ -55,7 +55,7 @@ func jsonTag(field reflect.StructField) (string, bool) { } func isPointerKind(kind reflect.Kind) bool { - return kind == reflect.Ptr + return kind == reflect.Pointer } func isStructKind(kind reflect.Kind) bool { @@ -139,7 +139,7 @@ func Convert(obj interface{}) (url.Values, error) { } var sv reflect.Value switch reflect.TypeOf(obj).Kind() { - case reflect.Ptr, reflect.Interface: + case reflect.Pointer, reflect.Interface: sv = reflect.ValueOf(obj).Elem() default: return nil, fmt.Errorf("expecting a pointer or interface") diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go index 8360d842b..19d823cef 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/labels.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go @@ -77,6 +77,8 @@ func (ls Set) AsValidatedSelector() (Selector, error) { // perform any validation. // According to our measurements this is significantly faster // in codepaths that matter at high scale. +// Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector +// instead, which does not copy. func (ls Set) AsSelectorPreValidated() Selector { return SelectorFromValidatedSet(ls) } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index 2434429b9..5e6014240 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -74,9 +74,12 @@ type Selector interface { RequiresExactMatch(label string) (value string, found bool) } +// Sharing this saves 1 alloc per use; this is safe because it's immutable. +var sharedEverythingSelector Selector = internalSelector{} + // Everything returns a selector that matches all labels. func Everything() Selector { - return internalSelector{} + return sharedEverythingSelector } type nothingSelector struct{} @@ -91,9 +94,12 @@ func (n nothingSelector) RequiresExactMatch(label string) (value string, found b return "", false } +// Sharing this saves 1 alloc per use; this is safe because it's immutable. +var sharedNothingSelector Selector = nothingSelector{} + // Nothing returns a selector that matches no labels func Nothing() Selector { - return nothingSelector{} + return sharedNothingSelector } // NewSelector returns a nil selector @@ -143,13 +149,12 @@ type Requirement struct { // NewRequirement is the constructor for a Requirement. // If any of these rules is violated, an error is returned: -// (1) The operator can only be In, NotIn, Equals, DoubleEquals, Gt, Lt, NotEquals, Exists, or DoesNotExist. -// (2) If the operator is In or NotIn, the values set must be non-empty. -// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. -// (4) If the operator is Exists or DoesNotExist, the value set must be empty. -// (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. -// (6) The key is invalid due to its length, or sequence -// of characters. See validateLabelKey for more details. +// 1. The operator can only be In, NotIn, Equals, DoubleEquals, Gt, Lt, NotEquals, Exists, or DoesNotExist. +// 2. If the operator is In or NotIn, the values set must be non-empty. +// 3. If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. +// 4. If the operator is Exists or DoesNotExist, the value set must be empty. +// 5. If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. +// 6. The key is invalid due to its length, or sequence of characters. See validateLabelKey for more details. // // The empty string is a valid value in the input values set. // Returned error, if not nil, is guaranteed to be an aggregated field.ErrorList @@ -206,14 +211,14 @@ func (r *Requirement) hasValue(value string) bool { // Matches returns true if the Requirement matches the input Labels. // There is a match in the following cases: -// (1) The operator is Exists and Labels has the Requirement's key. -// (2) The operator is In, Labels has the Requirement's key and Labels' +// 1. The operator is Exists and Labels has the Requirement's key. +// 2. The operator is In, Labels has the Requirement's key and Labels' // value for that key is in Requirement's value set. -// (3) The operator is NotIn, Labels has the Requirement's key and +// 3. The operator is NotIn, Labels has the Requirement's key and // Labels' value for that key is not in Requirement's value set. -// (4) The operator is DoesNotExist or NotIn and Labels does not have the +// 4. The operator is DoesNotExist or NotIn and Labels does not have the // Requirement's key. -// (5) The operator is GreaterThanOperator or LessThanOperator, and Labels has +// 5. The operator is GreaterThanOperator or LessThanOperator, and Labels has // the Requirement's key and the corresponding value satisfies mathematical inequality. func (r *Requirement) Matches(ls Labels) bool { switch r.operator { @@ -840,32 +845,32 @@ func (p *Parser) parseExactValue() (sets.String, error) { // as they parse different selectors with different syntaxes. // The input will cause an error if it does not follow this form: // -// ::= | "," -// ::= [!] KEY [ | ] -// ::= "" | -// ::= | -// ::= "notin" -// ::= "in" -// ::= "(" ")" -// ::= VALUE | VALUE "," -// ::= ["="|"=="|"!="] VALUE +// ::= | "," +// ::= [!] KEY [ | ] +// ::= "" | +// ::= | +// ::= "notin" +// ::= "in" +// ::= "(" ")" +// ::= VALUE | VALUE "," +// ::= ["="|"=="|"!="] VALUE // // KEY is a sequence of one or more characters following [ DNS_SUBDOMAIN "/" ] DNS_LABEL. Max length is 63 characters. // VALUE is a sequence of zero or more characters "([A-Za-z0-9_-\.])". Max length is 63 characters. // Delimiter is white space: (' ', '\t') // Example of valid syntax: -// "x in (foo,,baz),y,z notin ()" // -// Note: -// (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the -// VALUEs in its requirement -// (2) Exclusion - " notin " - denotes that the KEY is not equal to any -// of the VALUEs in its requirement or does not exist -// (3) The empty string is a valid VALUE -// (4) A requirement with just a KEY - as in "y" above - denotes that -// the KEY exists and can be any VALUE. -// (5) A requirement with just !KEY requires that the KEY not exist. +// "x in (foo,,baz),y,z notin ()" // +// Note: +// 1. Inclusion - " in " - denotes that the KEY exists and is equal to any of the +// VALUEs in its requirement +// 2. Exclusion - " notin " - denotes that the KEY is not equal to any +// of the VALUEs in its requirement or does not exist +// 3. The empty string is a valid VALUE +// 4. A requirement with just a KEY - as in "y" above - denotes that +// the KEY exists and can be any VALUE. +// 5. A requirement with just !KEY requires that the KEY not exist. func Parse(selector string, opts ...field.PathOption) (Selector, error) { parsedSelector, err := parse(selector, field.ToPath(opts...)) if err == nil { @@ -933,6 +938,8 @@ func ValidatedSelectorFromSet(ls Set) (Selector, error) { // SelectorFromValidatedSet returns a Selector which will match exactly the given Set. // A nil and empty Sets are considered equivalent to Everything(). // It assumes that Set is already validated and doesn't do any validation. +// Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector +// instead, which does not copy. func SelectorFromValidatedSet(ls Set) Selector { if ls == nil || len(ls) == 0 { return internalSelector{} @@ -954,3 +961,76 @@ func SelectorFromValidatedSet(ls Set) Selector { func ParseToRequirements(selector string, opts ...field.PathOption) ([]Requirement, error) { return parse(selector, field.ToPath(opts...)) } + +// ValidatedSetSelector wraps a Set, allowing it to implement the Selector interface. Unlike +// Set.AsSelectorPreValidated (which copies the input Set), this type simply wraps the underlying +// Set. As a result, it is substantially more efficient. A nil and empty Sets are considered +// equivalent to Everything(). +// +// Callers MUST ensure the underlying Set is not mutated, and that it is already validated. If these +// constraints are not met, Set.AsValidatedSelector should be preferred +// +// None of the Selector methods mutate the underlying Set, but Add() and Requirements() convert to +// the less optimized version. +type ValidatedSetSelector Set + +func (s ValidatedSetSelector) Matches(labels Labels) bool { + for k, v := range s { + if !labels.Has(k) || v != labels.Get(k) { + return false + } + } + return true +} + +func (s ValidatedSetSelector) Empty() bool { + return len(s) == 0 +} + +func (s ValidatedSetSelector) String() string { + keys := make([]string, 0, len(s)) + for k := range s { + keys = append(keys, k) + } + // Ensure deterministic output + sort.Strings(keys) + b := strings.Builder{} + for i, key := range keys { + v := s[key] + b.Grow(len(key) + 2 + len(v)) + if i != 0 { + b.WriteString(",") + } + b.WriteString(key) + b.WriteString("=") + b.WriteString(v) + } + return b.String() +} + +func (s ValidatedSetSelector) Add(r ...Requirement) Selector { + return s.toFullSelector().Add(r...) +} + +func (s ValidatedSetSelector) Requirements() (requirements Requirements, selectable bool) { + return s.toFullSelector().Requirements() +} + +func (s ValidatedSetSelector) DeepCopySelector() Selector { + res := make(ValidatedSetSelector, len(s)) + for k, v := range s { + res[k] = v + } + return res +} + +func (s ValidatedSetSelector) RequiresExactMatch(label string) (value string, found bool) { + v, f := s[label] + return v, f +} + +func (s ValidatedSetSelector) toFullSelector() Selector { + return SelectorFromValidatedSet(Set(s)) +} + +var _ Selector = ValidatedSetSelector{} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/allocator.go b/vendor/k8s.io/apimachinery/pkg/runtime/allocator.go index 0d00d8c3a..8bf22ae8a 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/allocator.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/allocator.go @@ -24,12 +24,14 @@ import ( // by caching created but unused items for later reuse, relieving pressure on the garbage collector. // // Usage: -// memoryAllocator := runtime.AllocatorPool.Get().(*runtime.Allocator) -// defer runtime.AllocatorPool.Put(memoryAllocator) +// +// memoryAllocator := runtime.AllocatorPool.Get().(*runtime.Allocator) +// defer runtime.AllocatorPool.Put(memoryAllocator) // // A note for future: -// consider introducing multiple pools for storing buffers of different sizes -// perhaps this could allow us to be more efficient. +// +// consider introducing multiple pools for storing buffers of different sizes +// perhaps this could allow us to be more efficient. var AllocatorPool = sync.Pool{ New: func() interface{} { return &Allocator{} @@ -58,7 +60,7 @@ func (a *Allocator) Allocate(n uint64) []byte { } // grow the buffer size := uint64(2*cap(a.buf)) + n - a.buf = make([]byte, size, size) + a.buf = make([]byte, size) a.buf = a.buf[:n] return a.buf } @@ -70,5 +72,5 @@ type SimpleAllocator struct{} var _ MemoryAllocator = &SimpleAllocator{} func (sa *SimpleAllocator) Allocate(n uint64) []byte { - return make([]byte, n, n) + return make([]byte, n) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go index a92863139..73f85286c 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go @@ -45,7 +45,6 @@ func NewCodec(e Encoder, d Decoder) Codec { // Encode is a convenience wrapper for encoding to a []byte from an Encoder func Encode(e Encoder, obj Object) ([]byte, error) { - // TODO: reuse buffer buf := &bytes.Buffer{} if err := e.Encode(obj, buf); err != nil { return nil, err @@ -344,14 +343,15 @@ func NewMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKi // Incoming kinds that match the provided groupKinds are preferred. // Kind may be empty in the provided group kind, in which case any kind will match. // Examples: -// gv=mygroup/__internal, groupKinds=mygroup/Foo, anothergroup/Bar -// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group/kind) // -// gv=mygroup/__internal, groupKinds=mygroup, anothergroup -// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group) +// gv=mygroup/__internal, groupKinds=mygroup/Foo, anothergroup/Bar +// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group/kind) // -// gv=mygroup/__internal, groupKinds=mygroup, anothergroup -// KindForGroupVersionKinds(yetanother/v1/Baz, yetanother/v1/Bar) -> mygroup/__internal/Baz (no preferred group/kind match, uses first kind in list) +// gv=mygroup/__internal, groupKinds=mygroup, anothergroup +// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group) +// +// gv=mygroup/__internal, groupKinds=mygroup, anothergroup +// KindForGroupVersionKinds(yetanother/v1/Baz, yetanother/v1/Bar) -> mygroup/__internal/Baz (no preferred group/kind match, uses first kind in list) func NewCoercingMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner { return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds, coerce: true} } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go index 000228061..e88400776 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go @@ -30,7 +30,7 @@ import ( // TODO: verify that the correct external version is chosen on encode... func CheckCodec(c Codec, internalType Object, externalTypes ...schema.GroupVersionKind) error { if _, err := Encode(c, internalType); err != nil { - return fmt.Errorf("Internal type not encodable: %v", err) + return fmt.Errorf("internal type not encodable: %v", err) } for _, et := range externalTypes { typeMeta := TypeMeta{ diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go index b640a9e76..62eb27afc 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -231,13 +231,13 @@ func (c *fromUnstructuredContext) pushKey(key string) { } -// FromUnstructuredWIthValidation converts an object from map[string]interface{} representation into a concrete type. +// FromUnstructuredWithValidation converts an object from map[string]interface{} representation into a concrete type. // It uses encoding/json/Unmarshaler if object implements it or reflection if not. // It takes a validationDirective that indicates how to behave when it encounters unknown fields. func (c *unstructuredConverter) FromUnstructuredWithValidation(u map[string]interface{}, obj interface{}, returnUnknownFields bool) error { t := reflect.TypeOf(obj) value := reflect.ValueOf(obj) - if t.Kind() != reflect.Ptr || value.IsNil() { + if t.Kind() != reflect.Pointer || value.IsNil() { return fmt.Errorf("FromUnstructured requires a non-nil pointer to an object, got %v", t) } @@ -291,7 +291,7 @@ func fromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error st, dt := sv.Type(), dv.Type() switch dt.Kind() { - case reflect.Map, reflect.Slice, reflect.Ptr, reflect.Struct, reflect.Interface: + case reflect.Map, reflect.Slice, reflect.Pointer, reflect.Struct, reflect.Interface: // Those require non-trivial conversion. default: // This should handle all simple types. @@ -353,7 +353,7 @@ func fromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error return mapFromUnstructured(sv, dv, ctx) case reflect.Slice: return sliceFromUnstructured(sv, dv, ctx) - case reflect.Ptr: + case reflect.Pointer: return pointerFromUnstructured(sv, dv, ctx) case reflect.Struct: return structFromUnstructured(sv, dv, ctx) @@ -465,7 +465,7 @@ func sliceFromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) e } dv.SetBytes(data) } else { - dv.Set(reflect.Zero(dt)) + dv.Set(reflect.MakeSlice(dt, 0, 0)) } return nil } @@ -496,13 +496,13 @@ func sliceFromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) e func pointerFromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error { st, dt := sv.Type(), dv.Type() - if st.Kind() == reflect.Ptr && sv.IsNil() { + if st.Kind() == reflect.Pointer && sv.IsNil() { dv.Set(reflect.Zero(dt)) return nil } dv.Set(reflect.New(dt.Elem())) switch st.Kind() { - case reflect.Ptr, reflect.Interface: + case reflect.Pointer, reflect.Interface: return fromUnstructured(sv.Elem(), dv.Elem(), ctx) default: return fromUnstructured(sv, dv.Elem(), ctx) @@ -579,7 +579,7 @@ func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]inte } else { t := reflect.TypeOf(obj) value := reflect.ValueOf(obj) - if t.Kind() != reflect.Ptr || value.IsNil() { + if t.Kind() != reflect.Pointer || value.IsNil() { return nil, fmt.Errorf("ToUnstructured requires a non-nil pointer to an object, got %v", t) } u = map[string]interface{}{} @@ -686,7 +686,7 @@ func toUnstructured(sv, dv reflect.Value) error { return mapToUnstructured(sv, dv) case reflect.Slice: return sliceToUnstructured(sv, dv) - case reflect.Ptr: + case reflect.Pointer: return pointerToUnstructured(sv, dv) case reflect.Struct: return structToUnstructured(sv, dv) @@ -790,7 +790,7 @@ func isZero(v reflect.Value) bool { case reflect.Map, reflect.Slice: // TODO: It seems that 0-len maps are ignored in it. return v.IsNil() || v.Len() == 0 - case reflect.Ptr, reflect.Interface: + case reflect.Pointer, reflect.Interface: return v.IsNil() } return false diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto index de634e2c6..5f06cc574 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto @@ -31,32 +31,37 @@ option go_package = "k8s.io/apimachinery/pkg/runtime"; // various plugin types. // // // Internal package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.Object `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } +// +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.Object `json:"myPlugin"` +// } +// +// type PluginA struct { +// AOption string `json:"aOption"` +// } // // // External package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.RawExtension `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } +// +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.RawExtension `json:"myPlugin"` +// } +// +// type PluginA struct { +// AOption string `json:"aOption"` +// } // // // On the wire, the JSON will look something like this: -// { -// "kind":"MyAPIObject", -// "apiVersion":"v1", -// "myPlugin": { -// "kind":"PluginA", -// "aOption":"foo", -// }, -// } +// +// { +// "kind":"MyAPIObject", +// "apiVersion":"v1", +// "myPlugin": { +// "kind":"PluginA", +// "aOption":"foo", +// }, +// } // // So what happens? Decode first uses json or yaml to unmarshal the serialized data into // your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. @@ -78,10 +83,12 @@ message RawExtension { // TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, // like this: -// type MyAwesomeAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// ... // other fields -// } +// +// type MyAwesomeAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// ... // other fields +// } +// // func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind // // TypeMeta is provided here for convenience. You may use it directly from this package or define diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index 710a97795..e89ea8939 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -365,4 +365,9 @@ type Unstructured interface { // error should terminate the iteration. If IsList() returns false, this method should return an error // instead of calling the provided function. EachListItem(func(Object) error) error + // EachListItemWithAlloc works like EachListItem, but avoids retaining references to a slice of items. + // It does this by making a shallow copy of non-pointer items before passing them to fn. + // + // If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency. + EachListItemWithAlloc(func(Object) error) error } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go index 994a3e3fa..d1c37c942 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -39,7 +39,7 @@ func ParseResourceArg(arg string) (*GroupVersionResource, GroupResource) { // ParseKindArg takes the common style of string which may be either `Kind.group.com` or `Kind.version.group.com` // and parses it out into both possibilities. This code takes no responsibility for knowing which representation was intended // but with a knowledge of all GroupKinds, calling code can take a very good guess. If there are only two segments, then -// `*GroupVersionResource` is nil. +// `*GroupVersionKind` is nil. // `Kind.group.com` -> `group=com, version=group, kind=Kind` and `group=group.com, kind=Kind` func ParseKindArg(arg string) (*GroupVersionKind, GroupKind) { var gvk *GroupVersionKind @@ -191,7 +191,7 @@ func (gv GroupVersion) Identifier() string { // if none of the options match the group. It prefers a match to group and version over just group. // TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme. // TODO: Introduce an adapter type between GroupVersion and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) -// in fewer places. +// in fewer places. func (gv GroupVersion) KindForGroupVersionKinds(kinds []GroupVersionKind) (target GroupVersionKind, ok bool) { for _, gvk := range kinds { if gvk.Group == gv.Group && gvk.Version == gv.Version { @@ -239,7 +239,7 @@ func (gv GroupVersion) WithResource(resource string) GroupVersionResource { // GroupVersions can be used to represent a set of desired group versions. // TODO: Move GroupVersions to a package under pkg/runtime, since it's used by scheme. // TODO: Introduce an adapter type between GroupVersions and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) -// in fewer places. +// in fewer places. type GroupVersions []GroupVersion // Identifier implements runtime.GroupVersioner interface. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index a9d656f8e..a5b116718 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -118,7 +118,7 @@ func (s *Scheme) Converter() *conversion.Converter { // API group and version that would never be updated. // // TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into -// every version with particular schemas. Resolve this method at that point. +// every version with particular schemas. Resolve this method at that point. func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Object) { s.addObservedVersion(version) s.AddKnownTypes(version, types...) @@ -141,7 +141,7 @@ func (s *Scheme) AddKnownTypes(gv schema.GroupVersion, types ...Object) { s.addObservedVersion(gv) for _, obj := range types { t := reflect.TypeOf(obj) - if t.Kind() != reflect.Ptr { + if t.Kind() != reflect.Pointer { panic("All types must be pointers to structs.") } t = t.Elem() @@ -159,7 +159,7 @@ func (s *Scheme) AddKnownTypeWithName(gvk schema.GroupVersionKind, obj Object) { if len(gvk.Version) == 0 { panic(fmt.Sprintf("version is required on all types: %s %v", gvk, t)) } - if t.Kind() != reflect.Ptr { + if t.Kind() != reflect.Pointer { panic("All types must be pointers to structs.") } t = t.Elem() @@ -462,7 +462,7 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) ( } else { // determine the incoming kinds with as few allocations as possible. t = reflect.TypeOf(in) - if t.Kind() != reflect.Ptr { + if t.Kind() != reflect.Pointer { return nil, fmt.Errorf("only pointer types may be converted: %v", t) } t = t.Elem() diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index 9de35e791..ff9820842 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -259,7 +259,7 @@ func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo { // invoke CodecForVersions. Callers that need only to read data should use UniversalDecoder(). // // TODO: make this call exist only in pkg/api, and initialize it with the set of default versions. -// All other callers will be forced to request a Codec directly. +// All other callers will be forced to request a Codec directly. func (f CodecFactory) LegacyCodec(version ...schema.GroupVersion) runtime.Codec { return versioning.NewDefaultingCodecForScheme(f.scheme, f.legacySerializer, f.universal, schema.GroupVersions(version), runtime.InternalGroupVersioner) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index 446633182..25f955ed7 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -147,7 +147,7 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru } if d, ok := obj.(runtime.NestedObjectDecoder); ok { - if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil { + if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{Decoder: c.decoder}); err != nil { if strictErr, ok := runtime.AsStrictDecodingError(err); ok { // save the strictDecodingError let and the caller decide what to do with it strictDecodingErrs = append(strictDecodingErrs, strictErr.Errors()...) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/splice.go b/vendor/k8s.io/apimachinery/pkg/runtime/splice.go new file mode 100644 index 000000000..2badb7b97 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/splice.go @@ -0,0 +1,76 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "bytes" + "io" +) + +// Splice is the interface that wraps the Splice method. +// +// Splice moves data from given slice without copying the underlying data for +// efficiency purpose. Therefore, the caller should make sure the underlying +// data is not changed later. +type Splice interface { + Splice([]byte) + io.Writer + Reset() + Bytes() []byte +} + +// A spliceBuffer implements Splice and io.Writer interfaces. +type spliceBuffer struct { + raw []byte + buf *bytes.Buffer +} + +func NewSpliceBuffer() Splice { + return &spliceBuffer{} +} + +// Splice implements the Splice interface. +func (sb *spliceBuffer) Splice(raw []byte) { + sb.raw = raw +} + +// Write implements the io.Writer interface. +func (sb *spliceBuffer) Write(p []byte) (n int, err error) { + if sb.buf == nil { + sb.buf = &bytes.Buffer{} + } + return sb.buf.Write(p) +} + +// Reset resets the buffer to be empty. +func (sb *spliceBuffer) Reset() { + if sb.buf != nil { + sb.buf.Reset() + } + sb.raw = nil +} + +// Bytes returns the data held by the buffer. +func (sb *spliceBuffer) Bytes() []byte { + if sb.buf != nil && len(sb.buf.Bytes()) > 0 { + return sb.buf.Bytes() + } + if sb.raw != nil { + return sb.raw + } + return []byte{} +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go index 31359f35f..ce77c7910 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -21,10 +21,12 @@ package runtime // TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, // like this: -// type MyAwesomeAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// ... // other fields -// } +// +// type MyAwesomeAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// ... // other fields +// } +// // func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind // // TypeMeta is provided here for convenience. You may use it directly from this package or define @@ -53,32 +55,37 @@ const ( // various plugin types. // // // Internal package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.Object `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } +// +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.Object `json:"myPlugin"` +// } +// +// type PluginA struct { +// AOption string `json:"aOption"` +// } // // // External package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.RawExtension `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } +// +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.RawExtension `json:"myPlugin"` +// } +// +// type PluginA struct { +// AOption string `json:"aOption"` +// } // // // On the wire, the JSON will look something like this: -// { -// "kind":"MyAPIObject", -// "apiVersion":"v1", -// "myPlugin": { -// "kind":"PluginA", -// "aOption":"foo", -// }, -// } +// +// { +// "kind":"MyAPIObject", +// "apiVersion":"v1", +// "myPlugin": { +// "kind":"PluginA", +// "aOption":"foo", +// }, +// } // // So what happens? Decode first uses json or yaml to unmarshal the serialized data into // your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. @@ -116,7 +123,7 @@ type Unknown struct { // Raw will hold the complete serialized object which couldn't be matched // with a registered type. Most likely, nothing should be done with this // except for passing it through the system. - Raw []byte `protobuf:"bytes,2,opt,name=raw"` + Raw []byte `json:"-" protobuf:"bytes,2,opt,name=raw"` // ContentEncoding is encoding used to encode 'Raw' data. // Unspecified means no encoding. ContentEncoding string `protobuf:"bytes,3,opt,name=contentEncoding"` diff --git a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go index b19750f3a..db18ce1ce 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go +++ b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go @@ -37,3 +37,14 @@ const ( func (n NamespacedName) String() string { return n.Namespace + string(Separator) + n.Name } + +// MarshalLog emits a struct containing required key/value pair +func (n NamespacedName) MarshalLog() interface{} { + return struct { + Name string `json:"name"` + Namespace string `json:"namespace,omitempty"` + }{ + Name: n.Name, + Namespace: n.Namespace, + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/types/nodename.go b/vendor/k8s.io/apimachinery/pkg/types/nodename.go index fee348d7e..cff9ca671 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/nodename.go +++ b/vendor/k8s.io/apimachinery/pkg/types/nodename.go @@ -23,21 +23,21 @@ package types // // To clarify the various types: // -// * Node.Name is the Name field of the Node in the API. This should be stored in a NodeName. -// Unfortunately, because Name is part of ObjectMeta, we can't store it as a NodeName at the API level. +// - Node.Name is the Name field of the Node in the API. This should be stored in a NodeName. +// Unfortunately, because Name is part of ObjectMeta, we can't store it as a NodeName at the API level. // -// * Hostname is the hostname of the local machine (from uname -n). -// However, some components allow the user to pass in a --hostname-override flag, -// which will override this in most places. In the absence of anything more meaningful, -// kubelet will use Hostname as the Node.Name when it creates the Node. +// - Hostname is the hostname of the local machine (from uname -n). +// However, some components allow the user to pass in a --hostname-override flag, +// which will override this in most places. In the absence of anything more meaningful, +// kubelet will use Hostname as the Node.Name when it creates the Node. // // * The cloudproviders have the own names: GCE has InstanceName, AWS has InstanceId. // -// For GCE, InstanceName is the Name of an Instance object in the GCE API. On GCE, Instance.Name becomes the -// Hostname, and thus it makes sense also to use it as the Node.Name. But that is GCE specific, and it is up -// to the cloudprovider how to do this mapping. +// For GCE, InstanceName is the Name of an Instance object in the GCE API. On GCE, Instance.Name becomes the +// Hostname, and thus it makes sense also to use it as the Node.Name. But that is GCE specific, and it is up +// to the cloudprovider how to do this mapping. // -// For AWS, the InstanceID is not yet suitable for use as a Node.Name, so we actually use the -// PrivateDnsName for the Node.Name. And this is _not_ always the same as the hostname: if -// we are using a custom DHCP domain it won't be. +// For AWS, the InstanceID is not yet suitable for use as a Node.Name, so we actually use the +// PrivateDnsName for the Node.Name. And this is _not_ always the same as the hostname: if +// we are using a custom DHCP domain it won't be. type NodeName string diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go index 0d2f153bf..1396274c7 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go @@ -40,6 +40,13 @@ func NewExpiringWithClock(clock clock.Clock) *Expiring { // Expiring is a map whose entries expire after a per-entry timeout. type Expiring struct { + // AllowExpiredGet causes the expiration check to be skipped on Get. + // It should only be used when a key always corresponds to the exact same value. + // Thus when this field is true, expired keys are considered valid + // until the next call to Set (which causes the GC to run). + // It may not be changed concurrently with calls to Get. + AllowExpiredGet bool + clock clock.Clock // mu protects the below fields @@ -70,7 +77,10 @@ func (c *Expiring) Get(key interface{}) (val interface{}, ok bool) { c.mu.RLock() defer c.mu.RUnlock() e, ok := c.cache[key] - if !ok || !c.clock.Now().Before(e.expiry) { + if !ok { + return nil, false + } + if !c.AllowExpiredGet && !c.clock.Now().Before(e.expiry) { return nil, false } return e.val, true diff --git a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go index fa9ffa51b..fc0301844 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go +++ b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go @@ -23,34 +23,20 @@ import ( "strings" "text/tabwriter" - "github.com/davecgh/go-spew/spew" "github.com/google/go-cmp/cmp" + "k8s.io/apimachinery/pkg/util/dump" ) -// StringDiff diffs a and b and returns a human readable diff. -func StringDiff(a, b string) string { - ba := []byte(a) - bb := []byte(b) - out := []byte{} - i := 0 - for ; i < len(ba) && i < len(bb); i++ { - if ba[i] != bb[i] { - break - } - out = append(out, ba[i]) - } - out = append(out, []byte("\n\nA: ")...) - out = append(out, ba[i:]...) - out = append(out, []byte("\n\nB: ")...) - out = append(out, bb[i:]...) - out = append(out, []byte("\n\n")...) - return string(out) -} - func legacyDiff(a, b interface{}) string { return cmp.Diff(a, b) } +// StringDiff diffs a and b and returns a human readable diff. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff +func StringDiff(a, b string) string { + return legacyDiff(a, b) +} + // ObjectDiff prints the diff of two go objects and fails if the objects // contain unhandled unexported fields. // DEPRECATED: use github.com/google/go-cmp/cmp.Diff @@ -75,13 +61,8 @@ func ObjectReflectDiff(a, b interface{}) string { // ObjectGoPrintSideBySide prints a and b as textual dumps side by side, // enabling easy visual scanning for mismatches. func ObjectGoPrintSideBySide(a, b interface{}) string { - s := spew.ConfigState{ - Indent: " ", - // Extra deep spew. - DisableMethods: true, - } - sA := s.Sdump(a) - sB := s.Sdump(b) + sA := dump.Pretty(a) + sB := dump.Pretty(b) linesA := strings.Split(sA, "\n") linesB := strings.Split(sB, "\n") @@ -135,7 +116,7 @@ func IgnoreUnset() cmp.Option { if v2.Len() == 0 { return true } - case reflect.Interface, reflect.Ptr: + case reflect.Interface, reflect.Pointer: if v2.IsNil() { return true } diff --git a/vendor/k8s.io/apimachinery/pkg/util/dump/dump.go b/vendor/k8s.io/apimachinery/pkg/util/dump/dump.go new file mode 100644 index 000000000..cf61ef76a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/dump/dump.go @@ -0,0 +1,54 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dump + +import ( + "github.com/davecgh/go-spew/spew" +) + +var prettyPrintConfig = &spew.ConfigState{ + Indent: " ", + DisableMethods: true, + DisablePointerAddresses: true, + DisableCapacities: true, +} + +// The config MUST NOT be changed because that could change the result of a hash operation +var prettyPrintConfigForHash = &spew.ConfigState{ + Indent: " ", + SortKeys: true, + DisableMethods: true, + SpewKeys: true, + DisablePointerAddresses: true, + DisableCapacities: true, +} + +// Pretty wrap the spew.Sdump with Indent, and disabled methods like error() and String() +// The output may change over time, so for guaranteed output please take more direct control +func Pretty(a interface{}) string { + return prettyPrintConfig.Sdump(a) +} + +// ForHash keeps the original Spew.Sprintf format to ensure the same checksum +func ForHash(a interface{}) string { + return prettyPrintConfigForHash.Sprintf("%#v", a) +} + +// OneLine outputs the object in one line +func OneLine(a interface{}) string { + return prettyPrintConfig.Sprintf("%#v", a) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go index 1f5a04fd4..1b60d145c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -214,7 +214,7 @@ func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate { return NewAggregate(result) } -// Reduce will return err or, if err is an Aggregate and only has one item, +// Reduce will return err or nil, if err is an Aggregate and only has one item, // the first item in the aggregate. func Reduce(err error) error { if agg, ok := err.(Aggregate); ok && err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go index 10df0d99c..9b3c9c8d5 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go @@ -32,7 +32,7 @@ func NewLengthDelimitedFrameWriter(w io.Writer) io.Writer { return &lengthDelimitedFrameWriter{w: w} } -// Write writes a single frame to the nested writer, prepending it with the length in +// Write writes a single frame to the nested writer, prepending it with the length // in bytes of data (as a 4 byte, bigendian uint32). func (w *lengthDelimitedFrameWriter) Write(data []byte) (int, error) { binary.BigEndian.PutUint32(w.h[:], uint32(len(data))) @@ -56,10 +56,10 @@ type lengthDelimitedFrameReader struct { // // The protocol is: // -// stream: message ... -// message: prefix body -// prefix: 4 byte uint32 in BigEndian order, denotes length of body -// body: bytes (0..prefix) +// stream: message ... +// message: prefix body +// prefix: 4 byte uint32 in BigEndian order, denotes length of body +// body: bytes (0..prefix) // // If the buffer passed to Read is not long enough to contain an entire frame, io.ErrShortRead // will be returned along with the number of bytes read. diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go new file mode 100644 index 000000000..5893df5bd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package httpstream adds multiplexed streaming support to HTTP requests and +// responses via connection upgrades. +package httpstream // import "k8s.io/apimachinery/pkg/util/httpstream" diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go new file mode 100644 index 000000000..32f075782 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go @@ -0,0 +1,159 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package httpstream + +import ( + "fmt" + "io" + "net/http" + "strings" + "time" +) + +const ( + HeaderConnection = "Connection" + HeaderUpgrade = "Upgrade" + HeaderProtocolVersion = "X-Stream-Protocol-Version" + HeaderAcceptedProtocolVersions = "X-Accepted-Stream-Protocol-Versions" +) + +// NewStreamHandler defines a function that is called when a new Stream is +// received. If no error is returned, the Stream is accepted; otherwise, +// the stream is rejected. After the reply frame has been sent, replySent is closed. +type NewStreamHandler func(stream Stream, replySent <-chan struct{}) error + +// NoOpNewStreamHandler is a stream handler that accepts a new stream and +// performs no other logic. +func NoOpNewStreamHandler(stream Stream, replySent <-chan struct{}) error { return nil } + +// Dialer knows how to open a streaming connection to a server. +type Dialer interface { + + // Dial opens a streaming connection to a server using one of the protocols + // specified (in order of most preferred to least preferred). + Dial(protocols ...string) (Connection, string, error) +} + +// UpgradeRoundTripper is a type of http.RoundTripper that is able to upgrade +// HTTP requests to support multiplexed bidirectional streams. After RoundTrip() +// is invoked, if the upgrade is successful, clients may retrieve the upgraded +// connection by calling UpgradeRoundTripper.Connection(). +type UpgradeRoundTripper interface { + http.RoundTripper + // NewConnection validates the response and creates a new Connection. + NewConnection(resp *http.Response) (Connection, error) +} + +// ResponseUpgrader knows how to upgrade HTTP requests and responses to +// add streaming support to them. +type ResponseUpgrader interface { + // UpgradeResponse upgrades an HTTP response to one that supports multiplexed + // streams. newStreamHandler will be called asynchronously whenever the + // other end of the upgraded connection creates a new stream. + UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler NewStreamHandler) Connection +} + +// Connection represents an upgraded HTTP connection. +type Connection interface { + // CreateStream creates a new Stream with the supplied headers. + CreateStream(headers http.Header) (Stream, error) + // Close resets all streams and closes the connection. + Close() error + // CloseChan returns a channel that is closed when the underlying connection is closed. + CloseChan() <-chan bool + // SetIdleTimeout sets the amount of time the connection may remain idle before + // it is automatically closed. + SetIdleTimeout(timeout time.Duration) + // RemoveStreams can be used to remove a set of streams from the Connection. + RemoveStreams(streams ...Stream) +} + +// Stream represents a bidirectional communications channel that is part of an +// upgraded connection. +type Stream interface { + io.ReadWriteCloser + // Reset closes both directions of the stream, indicating that neither client + // or server can use it any more. + Reset() error + // Headers returns the headers used to create the stream. + Headers() http.Header + // Identifier returns the stream's ID. + Identifier() uint32 +} + +// IsUpgradeRequest returns true if the given request is a connection upgrade request +func IsUpgradeRequest(req *http.Request) bool { + for _, h := range req.Header[http.CanonicalHeaderKey(HeaderConnection)] { + if strings.Contains(strings.ToLower(h), strings.ToLower(HeaderUpgrade)) { + return true + } + } + return false +} + +func negotiateProtocol(clientProtocols, serverProtocols []string) string { + for i := range clientProtocols { + for j := range serverProtocols { + if clientProtocols[i] == serverProtocols[j] { + return clientProtocols[i] + } + } + } + return "" +} + +func commaSeparatedHeaderValues(header []string) []string { + var parsedClientProtocols []string + for i := range header { + for _, clientProtocol := range strings.Split(header[i], ",") { + if proto := strings.Trim(clientProtocol, " "); len(proto) > 0 { + parsedClientProtocols = append(parsedClientProtocols, proto) + } + } + } + return parsedClientProtocols +} + +// Handshake performs a subprotocol negotiation. If the client did request a +// subprotocol, Handshake will select the first common value found in +// serverProtocols. If a match is found, Handshake adds a response header +// indicating the chosen subprotocol. If no match is found, HTTP forbidden is +// returned, along with a response header containing the list of protocols the +// server can accept. +func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []string) (string, error) { + clientProtocols := commaSeparatedHeaderValues(req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)]) + if len(clientProtocols) == 0 { + return "", fmt.Errorf("unable to upgrade: %s is required", HeaderProtocolVersion) + } + + if len(serverProtocols) == 0 { + panic(fmt.Errorf("unable to upgrade: serverProtocols is required")) + } + + negotiatedProtocol := negotiateProtocol(clientProtocols, serverProtocols) + if len(negotiatedProtocol) == 0 { + for i := range serverProtocols { + w.Header().Add(HeaderAcceptedProtocolVersions, serverProtocols[i]) + } + err := fmt.Errorf("unable to upgrade: unable to negotiate protocol: client supports %v, server accepts %v", clientProtocols, serverProtocols) + http.Error(w, err.Error(), http.StatusForbidden) + return "", err + } + + w.Header().Add(HeaderProtocolVersion, negotiatedProtocol) + return negotiatedProtocol, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go new file mode 100644 index 000000000..d4ceab84f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go @@ -0,0 +1,204 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "net" + "net/http" + "sync" + "time" + + "github.com/moby/spdystream" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/klog/v2" +) + +// connection maintains state about a spdystream.Connection and its associated +// streams. +type connection struct { + conn *spdystream.Connection + streams map[uint32]httpstream.Stream + streamLock sync.Mutex + newStreamHandler httpstream.NewStreamHandler + ping func() (time.Duration, error) +} + +// NewClientConnection creates a new SPDY client connection. +func NewClientConnection(conn net.Conn) (httpstream.Connection, error) { + return NewClientConnectionWithPings(conn, 0) +} + +// NewClientConnectionWithPings creates a new SPDY client connection. +// +// If pingPeriod is non-zero, a background goroutine will send periodic Ping +// frames to the server. Use this to keep idle connections through certain load +// balancers alive longer. +func NewClientConnectionWithPings(conn net.Conn, pingPeriod time.Duration) (httpstream.Connection, error) { + spdyConn, err := spdystream.NewConnection(conn, false) + if err != nil { + defer conn.Close() + return nil, err + } + + return newConnection(spdyConn, httpstream.NoOpNewStreamHandler, pingPeriod, spdyConn.Ping), nil +} + +// NewServerConnection creates a new SPDY server connection. newStreamHandler +// will be invoked when the server receives a newly created stream from the +// client. +func NewServerConnection(conn net.Conn, newStreamHandler httpstream.NewStreamHandler) (httpstream.Connection, error) { + return NewServerConnectionWithPings(conn, newStreamHandler, 0) +} + +// NewServerConnectionWithPings creates a new SPDY server connection. +// newStreamHandler will be invoked when the server receives a newly created +// stream from the client. +// +// If pingPeriod is non-zero, a background goroutine will send periodic Ping +// frames to the server. Use this to keep idle connections through certain load +// balancers alive longer. +func NewServerConnectionWithPings(conn net.Conn, newStreamHandler httpstream.NewStreamHandler, pingPeriod time.Duration) (httpstream.Connection, error) { + spdyConn, err := spdystream.NewConnection(conn, true) + if err != nil { + defer conn.Close() + return nil, err + } + + return newConnection(spdyConn, newStreamHandler, pingPeriod, spdyConn.Ping), nil +} + +// newConnection returns a new connection wrapping conn. newStreamHandler +// will be invoked when the server receives a newly created stream from the +// client. +func newConnection(conn *spdystream.Connection, newStreamHandler httpstream.NewStreamHandler, pingPeriod time.Duration, pingFn func() (time.Duration, error)) httpstream.Connection { + c := &connection{ + conn: conn, + newStreamHandler: newStreamHandler, + ping: pingFn, + streams: make(map[uint32]httpstream.Stream), + } + go conn.Serve(c.newSpdyStream) + if pingPeriod > 0 && pingFn != nil { + go c.sendPings(pingPeriod) + } + return c +} + +// createStreamResponseTimeout indicates how long to wait for the other side to +// acknowledge the new stream before timing out. +const createStreamResponseTimeout = 30 * time.Second + +// Close first sends a reset for all of the connection's streams, and then +// closes the underlying spdystream.Connection. +func (c *connection) Close() error { + c.streamLock.Lock() + for _, s := range c.streams { + // calling Reset instead of Close ensures that all streams are fully torn down + s.Reset() + } + c.streams = make(map[uint32]httpstream.Stream, 0) + c.streamLock.Unlock() + + // now that all streams are fully torn down, it's safe to call close on the underlying connection, + // which should be able to terminate immediately at this point, instead of waiting for any + // remaining graceful stream termination. + return c.conn.Close() +} + +// RemoveStreams can be used to removes a set of streams from the Connection. +func (c *connection) RemoveStreams(streams ...httpstream.Stream) { + c.streamLock.Lock() + for _, stream := range streams { + // It may be possible that the provided stream is nil if timed out. + if stream != nil { + delete(c.streams, stream.Identifier()) + } + } + c.streamLock.Unlock() +} + +// CreateStream creates a new stream with the specified headers and registers +// it with the connection. +func (c *connection) CreateStream(headers http.Header) (httpstream.Stream, error) { + stream, err := c.conn.CreateStream(headers, nil, false) + if err != nil { + return nil, err + } + if err = stream.WaitTimeout(createStreamResponseTimeout); err != nil { + return nil, err + } + + c.registerStream(stream) + return stream, nil +} + +// registerStream adds the stream s to the connection's list of streams that +// it owns. +func (c *connection) registerStream(s httpstream.Stream) { + c.streamLock.Lock() + c.streams[s.Identifier()] = s + c.streamLock.Unlock() +} + +// CloseChan returns a channel that, when closed, indicates that the underlying +// spdystream.Connection has been closed. +func (c *connection) CloseChan() <-chan bool { + return c.conn.CloseChan() +} + +// newSpdyStream is the internal new stream handler used by spdystream.Connection.Serve. +// It calls connection's newStreamHandler, giving it the opportunity to accept or reject +// the stream. If newStreamHandler returns an error, the stream is rejected. If not, the +// stream is accepted and registered with the connection. +func (c *connection) newSpdyStream(stream *spdystream.Stream) { + replySent := make(chan struct{}) + err := c.newStreamHandler(stream, replySent) + rejectStream := (err != nil) + if rejectStream { + klog.Warningf("Stream rejected: %v", err) + stream.Reset() + return + } + + c.registerStream(stream) + stream.SendReply(http.Header{}, rejectStream) + close(replySent) +} + +// SetIdleTimeout sets the amount of time the connection may remain idle before +// it is automatically closed. +func (c *connection) SetIdleTimeout(timeout time.Duration) { + c.conn.SetIdleTimeout(timeout) +} + +func (c *connection) sendPings(period time.Duration) { + t := time.NewTicker(period) + defer t.Stop() + for { + select { + case <-c.conn.CloseChan(): + return + case <-t.C: + } + if _, err := c.ping(); err != nil { + klog.V(3).Infof("SPDY Ping failed: %v", err) + // Continue, in case this is a transient failure. + // c.conn.CloseChan above will tell us when the connection is + // actually closed. + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go new file mode 100644 index 000000000..7fe52ee56 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go @@ -0,0 +1,370 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "bufio" + "context" + "crypto/tls" + "encoding/base64" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" + "strings" + "time" + + "golang.org/x/net/proxy" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/httpstream" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/third_party/forked/golang/netutil" +) + +// SpdyRoundTripper knows how to upgrade an HTTP request to one that supports +// multiplexed streams. After RoundTrip() is invoked, Conn will be set +// and usable. SpdyRoundTripper implements the UpgradeRoundTripper interface. +type SpdyRoundTripper struct { + //tlsConfig holds the TLS configuration settings to use when connecting + //to the remote server. + tlsConfig *tls.Config + + /* TODO according to http://golang.org/pkg/net/http/#RoundTripper, a RoundTripper + must be safe for use by multiple concurrent goroutines. If this is absolutely + necessary, we could keep a map from http.Request to net.Conn. In practice, + a client will create an http.Client, set the transport to a new insteace of + SpdyRoundTripper, and use it a single time, so this hopefully won't be an issue. + */ + // conn is the underlying network connection to the remote server. + conn net.Conn + + // Dialer is the dialer used to connect. Used if non-nil. + Dialer *net.Dialer + + // proxier knows which proxy to use given a request, defaults to http.ProxyFromEnvironment + // Used primarily for mocking the proxy discovery in tests. + proxier func(req *http.Request) (*url.URL, error) + + // pingPeriod is a period for sending Ping frames over established + // connections. + pingPeriod time.Duration +} + +var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{} +var _ httpstream.UpgradeRoundTripper = &SpdyRoundTripper{} +var _ utilnet.Dialer = &SpdyRoundTripper{} + +// NewRoundTripper creates a new SpdyRoundTripper that will use the specified +// tlsConfig. +func NewRoundTripper(tlsConfig *tls.Config) *SpdyRoundTripper { + return NewRoundTripperWithConfig(RoundTripperConfig{ + TLS: tlsConfig, + }) +} + +// NewRoundTripperWithProxy creates a new SpdyRoundTripper that will use the +// specified tlsConfig and proxy func. +func NewRoundTripperWithProxy(tlsConfig *tls.Config, proxier func(*http.Request) (*url.URL, error)) *SpdyRoundTripper { + return NewRoundTripperWithConfig(RoundTripperConfig{ + TLS: tlsConfig, + Proxier: proxier, + }) +} + +// NewRoundTripperWithConfig creates a new SpdyRoundTripper with the specified +// configuration. +func NewRoundTripperWithConfig(cfg RoundTripperConfig) *SpdyRoundTripper { + if cfg.Proxier == nil { + cfg.Proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) + } + return &SpdyRoundTripper{ + tlsConfig: cfg.TLS, + proxier: cfg.Proxier, + pingPeriod: cfg.PingPeriod, + } +} + +// RoundTripperConfig is a set of options for an SpdyRoundTripper. +type RoundTripperConfig struct { + // TLS configuration used by the round tripper. + TLS *tls.Config + // Proxier is a proxy function invoked on each request. Optional. + Proxier func(*http.Request) (*url.URL, error) + // PingPeriod is a period for sending SPDY Pings on the connection. + // Optional. + PingPeriod time.Duration +} + +// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during +// proxying with a spdy roundtripper. +func (s *SpdyRoundTripper) TLSClientConfig() *tls.Config { + return s.tlsConfig +} + +// Dial implements k8s.io/apimachinery/pkg/util/net.Dialer. +func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) { + conn, err := s.dial(req) + if err != nil { + return nil, err + } + + if err := req.Write(conn); err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +// dial dials the host specified by req, using TLS if appropriate, optionally +// using a proxy server if one is configured via environment variables. +func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) { + proxyURL, err := s.proxier(req) + if err != nil { + return nil, err + } + + if proxyURL == nil { + return s.dialWithoutProxy(req.Context(), req.URL) + } + + switch proxyURL.Scheme { + case "socks5": + return s.dialWithSocks5Proxy(req, proxyURL) + case "https", "http", "": + return s.dialWithHttpProxy(req, proxyURL) + } + + return nil, fmt.Errorf("proxy URL scheme not supported: %s", proxyURL.Scheme) +} + +// dialWithHttpProxy dials the host specified by url through an http or an https proxy. +func (s *SpdyRoundTripper) dialWithHttpProxy(req *http.Request, proxyURL *url.URL) (net.Conn, error) { + // ensure we use a canonical host with proxyReq + targetHost := netutil.CanonicalAddr(req.URL) + + // proxying logic adapted from http://blog.h6t.eu/post/74098062923/golang-websocket-with-http-proxy-support + proxyReq := http.Request{ + Method: "CONNECT", + URL: &url.URL{}, + Host: targetHost, + } + + proxyReq = *proxyReq.WithContext(req.Context()) + + if pa := s.proxyAuth(proxyURL); pa != "" { + proxyReq.Header = http.Header{} + proxyReq.Header.Set("Proxy-Authorization", pa) + } + + proxyDialConn, err := s.dialWithoutProxy(proxyReq.Context(), proxyURL) + if err != nil { + return nil, err + } + + //nolint:staticcheck // SA1019 ignore deprecated httputil.NewProxyClientConn + proxyClientConn := httputil.NewProxyClientConn(proxyDialConn, nil) + response, err := proxyClientConn.Do(&proxyReq) + //nolint:staticcheck // SA1019 ignore deprecated httputil.ErrPersistEOF: it might be + // returned from the invocation of proxyClientConn.Do + if err != nil && err != httputil.ErrPersistEOF { + return nil, err + } + if response != nil && response.StatusCode >= 300 || response.StatusCode < 200 { + return nil, fmt.Errorf("CONNECT request to %s returned response: %s", proxyURL.Redacted(), response.Status) + } + + rwc, _ := proxyClientConn.Hijack() + + if req.URL.Scheme == "https" { + return s.tlsConn(proxyReq.Context(), rwc, targetHost) + } + return rwc, nil +} + +// dialWithSocks5Proxy dials the host specified by url through a socks5 proxy. +func (s *SpdyRoundTripper) dialWithSocks5Proxy(req *http.Request, proxyURL *url.URL) (net.Conn, error) { + // ensure we use a canonical host with proxyReq + targetHost := netutil.CanonicalAddr(req.URL) + proxyDialAddr := netutil.CanonicalAddr(proxyURL) + + var auth *proxy.Auth + if proxyURL.User != nil { + pass, _ := proxyURL.User.Password() + auth = &proxy.Auth{ + User: proxyURL.User.Username(), + Password: pass, + } + } + + dialer := s.Dialer + if dialer == nil { + dialer = &net.Dialer{ + Timeout: 30 * time.Second, + } + } + + proxyDialer, err := proxy.SOCKS5("tcp", proxyDialAddr, auth, dialer) + if err != nil { + return nil, err + } + + // According to the implementation of proxy.SOCKS5, the type assertion will always succeed + contextDialer, ok := proxyDialer.(proxy.ContextDialer) + if !ok { + return nil, errors.New("SOCKS5 Dialer must implement ContextDialer") + } + + proxyDialConn, err := contextDialer.DialContext(req.Context(), "tcp", targetHost) + if err != nil { + return nil, err + } + + if req.URL.Scheme == "https" { + return s.tlsConn(req.Context(), proxyDialConn, targetHost) + } + return proxyDialConn, nil +} + +// tlsConn returns a TLS client side connection using rwc as the underlying transport. +func (s *SpdyRoundTripper) tlsConn(ctx context.Context, rwc net.Conn, targetHost string) (net.Conn, error) { + + host, _, err := net.SplitHostPort(targetHost) + if err != nil { + return nil, err + } + + tlsConfig := s.tlsConfig + switch { + case tlsConfig == nil: + tlsConfig = &tls.Config{ServerName: host} + case len(tlsConfig.ServerName) == 0: + tlsConfig = tlsConfig.Clone() + tlsConfig.ServerName = host + } + + tlsConn := tls.Client(rwc, tlsConfig) + + if err := tlsConn.HandshakeContext(ctx); err != nil { + tlsConn.Close() + return nil, err + } + + return tlsConn, nil +} + +// dialWithoutProxy dials the host specified by url, using TLS if appropriate. +func (s *SpdyRoundTripper) dialWithoutProxy(ctx context.Context, url *url.URL) (net.Conn, error) { + dialAddr := netutil.CanonicalAddr(url) + dialer := s.Dialer + if dialer == nil { + dialer = &net.Dialer{} + } + + if url.Scheme == "http" { + return dialer.DialContext(ctx, "tcp", dialAddr) + } + + tlsDialer := tls.Dialer{ + NetDialer: dialer, + Config: s.tlsConfig, + } + return tlsDialer.DialContext(ctx, "tcp", dialAddr) +} + +// proxyAuth returns, for a given proxy URL, the value to be used for the Proxy-Authorization header +func (s *SpdyRoundTripper) proxyAuth(proxyURL *url.URL) string { + if proxyURL == nil || proxyURL.User == nil { + return "" + } + username := proxyURL.User.Username() + password, _ := proxyURL.User.Password() + auth := username + ":" + password + return "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) +} + +// RoundTrip executes the Request and upgrades it. After a successful upgrade, +// clients may call SpdyRoundTripper.Connection() to retrieve the upgraded +// connection. +func (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + req = utilnet.CloneRequest(req) + req.Header.Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade) + req.Header.Add(httpstream.HeaderUpgrade, HeaderSpdy31) + + conn, err := s.Dial(req) + if err != nil { + return nil, err + } + + responseReader := bufio.NewReader(conn) + + resp, err := http.ReadResponse(responseReader, nil) + if err != nil { + conn.Close() + return nil, err + } + + s.conn = conn + + return resp, nil +} + +// NewConnection validates the upgrade response, creating and returning a new +// httpstream.Connection if there were no errors. +func (s *SpdyRoundTripper) NewConnection(resp *http.Response) (httpstream.Connection, error) { + connectionHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderConnection)) + upgradeHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderUpgrade)) + if (resp.StatusCode != http.StatusSwitchingProtocols) || !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) { + defer resp.Body.Close() + responseError := "" + responseErrorBytes, err := io.ReadAll(resp.Body) + if err != nil { + responseError = "unable to read error from server response" + } else { + // TODO: I don't belong here, I should be abstracted from this class + if obj, _, err := statusCodecs.UniversalDecoder().Decode(responseErrorBytes, nil, &metav1.Status{}); err == nil { + if status, ok := obj.(*metav1.Status); ok { + return nil, &apierrors.StatusError{ErrStatus: *status} + } + } + responseError = string(responseErrorBytes) + responseError = strings.TrimSpace(responseError) + } + + return nil, fmt.Errorf("unable to upgrade connection: %s", responseError) + } + + return NewClientConnectionWithPings(s.conn, s.pingPeriod) +} + +// statusScheme is private scheme for the decoding here until someone fixes the TODO in NewConnection +var statusScheme = runtime.NewScheme() + +// ParameterCodec knows about query parameters used with the meta v1 API spec. +var statusCodecs = serializer.NewCodecFactory(statusScheme) + +func init() { + statusScheme.AddUnversionedTypes(metav1.SchemeGroupVersion, + &metav1.Status{}, + ) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go new file mode 100644 index 000000000..d30ae2fa3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go @@ -0,0 +1,120 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "bufio" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync/atomic" + "time" + + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/runtime" +) + +const HeaderSpdy31 = "SPDY/3.1" + +// responseUpgrader knows how to upgrade HTTP responses. It +// implements the httpstream.ResponseUpgrader interface. +type responseUpgrader struct { + pingPeriod time.Duration +} + +// connWrapper is used to wrap a hijacked connection and its bufio.Reader. All +// calls will be handled directly by the underlying net.Conn with the exception +// of Read and Close calls, which will consider data in the bufio.Reader. This +// ensures that data already inside the used bufio.Reader instance is also +// read. +type connWrapper struct { + net.Conn + closed int32 + bufReader *bufio.Reader +} + +func (w *connWrapper) Read(b []byte) (n int, err error) { + if atomic.LoadInt32(&w.closed) == 1 { + return 0, io.EOF + } + return w.bufReader.Read(b) +} + +func (w *connWrapper) Close() error { + err := w.Conn.Close() + atomic.StoreInt32(&w.closed, 1) + return err +} + +// NewResponseUpgrader returns a new httpstream.ResponseUpgrader that is +// capable of upgrading HTTP responses using SPDY/3.1 via the +// spdystream package. +func NewResponseUpgrader() httpstream.ResponseUpgrader { + return NewResponseUpgraderWithPings(0) +} + +// NewResponseUpgraderWithPings returns a new httpstream.ResponseUpgrader that +// is capable of upgrading HTTP responses using SPDY/3.1 via the spdystream +// package. +// +// If pingPeriod is non-zero, for each incoming connection a background +// goroutine will send periodic Ping frames to the server. Use this to keep +// idle connections through certain load balancers alive longer. +func NewResponseUpgraderWithPings(pingPeriod time.Duration) httpstream.ResponseUpgrader { + return responseUpgrader{pingPeriod: pingPeriod} +} + +// UpgradeResponse upgrades an HTTP response to one that supports multiplexed +// streams. newStreamHandler will be called synchronously whenever the +// other end of the upgraded connection creates a new stream. +func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection { + connectionHeader := strings.ToLower(req.Header.Get(httpstream.HeaderConnection)) + upgradeHeader := strings.ToLower(req.Header.Get(httpstream.HeaderUpgrade)) + if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) { + errorMsg := fmt.Sprintf("unable to upgrade: missing upgrade headers in request: %#v", req.Header) + http.Error(w, errorMsg, http.StatusBadRequest) + return nil + } + + hijacker, ok := w.(http.Hijacker) + if !ok { + errorMsg := "unable to upgrade: unable to hijack response" + http.Error(w, errorMsg, http.StatusInternalServerError) + return nil + } + + w.Header().Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade) + w.Header().Add(httpstream.HeaderUpgrade, HeaderSpdy31) + w.WriteHeader(http.StatusSwitchingProtocols) + + conn, bufrw, err := hijacker.Hijack() + if err != nil { + runtime.HandleError(fmt.Errorf("unable to upgrade: error hijacking response: %v", err)) + return nil + } + + connWithBuf := &connWrapper{Conn: conn, bufReader: bufrw.Reader} + spdyConn, err := NewServerConnectionWithPings(connWithBuf, newStreamHandler, u.pingPeriod) + if err != nil { + runtime.HandleError(fmt.Errorf("unable to upgrade: error creating SPDY server connection: %v", err)) + return nil + } + + return spdyConn +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index c27380c19..0ea88156b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -54,7 +54,7 @@ const ( // FromInt creates an IntOrString object with an int32 value. It is // your responsibility not to call this method with a value greater // than int32. -// TODO: convert to (val int32) +// Deprecated: use FromInt32 instead. func FromInt(val int) IntOrString { if val > math.MaxInt32 || val < math.MinInt32 { klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) @@ -62,6 +62,11 @@ func FromInt(val int) IntOrString { return IntOrString{Type: Int, IntVal: int32(val)} } +// FromInt32 creates an IntOrString object with an int32 value. +func FromInt32(val int32) IntOrString { + return IntOrString{Type: Int, IntVal: val} +} + // FromString creates an IntOrString object with a string value. func FromString(val string) IntOrString { return IntOrString{Type: String, StrVal: val} @@ -145,7 +150,7 @@ func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrS // GetScaledValueFromIntOrPercent is meant to replace GetValueFromIntOrPercent. // This method returns a scaled value from an IntOrString type. If the IntOrString // is a percentage string value it's treated as a percentage and scaled appropriately -// in accordance to the total, if it's an int value it's treated as a a simple value and +// in accordance to the total, if it's an int value it's treated as a simple value and // if it is a string value which is either non-numeric or numeric but lacking a trailing '%' it returns an error. func GetScaledValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) { if intOrPercent == nil { diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml b/vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml new file mode 100644 index 000000000..a667e9834 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml @@ -0,0 +1,7018 @@ +apiVersion: v1 +kind: Endpoints +metadata: + creationTimestamp: '2016-10-04T17:45:58Z' + labels: + app: my-app + name: app-server + namespace: default + resourceVersion: '184597135' + selfLink: /self/link + uid: 6826f086-8a5a-11e6-8d09-42010a800005 +subsets: +- addresses: + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0000 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0001 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0002 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0003 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0004 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0005 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0006 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0007 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0008 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0009 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0010 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0011 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0012 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0013 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0014 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0015 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0016 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0017 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0018 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0019 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0020 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0021 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0022 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0023 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0024 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0025 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0026 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0027 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0028 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0029 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0030 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0031 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0032 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0033 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0034 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0035 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0036 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0037 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0038 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0039 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0040 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0041 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0042 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0043 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0044 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0045 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0046 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0047 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0048 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0049 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0050 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0051 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0052 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0053 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0054 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0055 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0056 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0057 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0058 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0059 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0060 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0061 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0062 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0063 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0064 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0065 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0066 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0067 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0068 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0069 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0070 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0071 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0072 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0073 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0074 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0075 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0076 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0077 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0078 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0079 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0080 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0081 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0082 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0083 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0084 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0085 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0086 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0087 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0088 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0089 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0090 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0091 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0092 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0093 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0094 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0095 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0096 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0097 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0098 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0099 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0100 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0101 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0102 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0103 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0104 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0105 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0106 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0107 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0108 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0109 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0110 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0111 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0112 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0113 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0114 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0115 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0116 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0117 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0118 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0119 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0120 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0121 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0122 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0123 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0124 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0125 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0126 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0127 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0128 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0129 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0130 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0131 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0132 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0133 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0134 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0135 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0136 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0137 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0138 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0139 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0140 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0141 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0142 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0143 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0144 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0145 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0146 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0147 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0148 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0149 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0150 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0151 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0152 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0153 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0154 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0155 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0156 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0157 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0158 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0159 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0160 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0161 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0162 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0163 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0164 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0165 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0166 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0167 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0168 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0169 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0170 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0171 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0172 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0173 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0174 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0175 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0176 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0177 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0178 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0179 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0180 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0181 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0182 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0183 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0184 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0185 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0186 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0187 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0188 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0189 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0190 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0191 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0192 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0193 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0194 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0195 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0196 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0197 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0198 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0199 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0200 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0201 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0202 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0203 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0204 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0205 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0206 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0207 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0208 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0209 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0210 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0211 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0212 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0213 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0214 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0215 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0216 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0217 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0218 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0219 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0220 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0221 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0222 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0223 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0224 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0225 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0226 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0227 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0228 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0229 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0230 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0231 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0232 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0233 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0234 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0235 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0236 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0237 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0238 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0239 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0240 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0241 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0242 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0243 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0244 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0245 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0246 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0247 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0248 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0249 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0250 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0251 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0252 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0253 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0254 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0255 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0256 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0257 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0258 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0259 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0260 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0261 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0262 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0263 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0264 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0265 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0266 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0267 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0268 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0269 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0270 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0271 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0272 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0273 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0274 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0275 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0276 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0277 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0278 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0279 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0280 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0281 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0282 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0283 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0284 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0285 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0286 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0287 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0288 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0289 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0290 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0291 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0292 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0293 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0294 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0295 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0296 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0297 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0298 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0299 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0300 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0301 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0302 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0303 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0304 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0305 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0306 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0307 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0308 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0309 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0310 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0311 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0312 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0313 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0314 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0315 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0316 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0317 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0318 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0319 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0320 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0321 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0322 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0323 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0324 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0325 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0326 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0327 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0328 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0329 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0330 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0331 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0332 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0333 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0334 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0335 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0336 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0337 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0338 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0339 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0340 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0341 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0342 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0343 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0344 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0345 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0346 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0347 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0348 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0349 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0350 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0351 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0352 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0353 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0354 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0355 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0356 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0357 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0358 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0359 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0360 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0361 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0362 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0363 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0364 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0365 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0366 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0367 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0368 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0369 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0370 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0371 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0372 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0373 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0374 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0375 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0376 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0377 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0378 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0379 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0380 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0381 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0382 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0383 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0384 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0385 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0386 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0387 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0388 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0389 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0390 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0391 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0392 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0393 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0394 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0395 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0396 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0397 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0398 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0399 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0400 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0401 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0402 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0403 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0404 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0405 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0406 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0407 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0408 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0409 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0410 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0411 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0412 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0413 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0414 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0415 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0416 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0417 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0418 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0419 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0420 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0421 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0422 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0423 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0424 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0425 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0426 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0427 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0428 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0429 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0430 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0431 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0432 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0433 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0434 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0435 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0436 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0437 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0438 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0439 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0440 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0441 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0442 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0443 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0444 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0445 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0446 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0447 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0448 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0449 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0450 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0451 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0452 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0453 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0454 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0455 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0456 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0457 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0458 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0459 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0460 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0461 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0462 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0463 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0464 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0465 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0466 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0467 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0468 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0469 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0470 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0471 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0472 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0473 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0474 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0475 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0476 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0477 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0478 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0479 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0480 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0481 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0482 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0483 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0484 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0485 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0486 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0487 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0488 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0489 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0490 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0491 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0492 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0493 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0494 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0495 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0496 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0497 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0498 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0499 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0500 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0501 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0502 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0503 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0504 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0505 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0506 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0507 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0508 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0509 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0510 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0511 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0512 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0513 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0514 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0515 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0516 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0517 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0518 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0519 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0520 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0521 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0522 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0523 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0524 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0525 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0526 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0527 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0528 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0529 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0530 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0531 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0532 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0533 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0534 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0535 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0536 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0537 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0538 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0539 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0540 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0541 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0542 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0543 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0544 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0545 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0546 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0547 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0548 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0549 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0550 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0551 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0552 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0553 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0554 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0555 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0556 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0557 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0558 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0559 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0560 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0561 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0562 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0563 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0564 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0565 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0566 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0567 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0568 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0569 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0570 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0571 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0572 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0573 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0574 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0575 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0576 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0577 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0578 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0579 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0580 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0581 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0582 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0583 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0584 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0585 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0586 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0587 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0588 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0589 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0590 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0591 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0592 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0593 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0594 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0595 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0596 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0597 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0598 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0599 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0600 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0601 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0602 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0603 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0604 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0605 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0606 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0607 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0608 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0609 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0610 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0611 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0612 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0613 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0614 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0615 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0616 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0617 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0618 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0619 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0620 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0621 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0622 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0623 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0624 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0625 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0626 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0627 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0628 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0629 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0630 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0631 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0632 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0633 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0634 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0635 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0636 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0637 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0638 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0639 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0640 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0641 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0642 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0643 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0644 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0645 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0646 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0647 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0648 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0649 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0650 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0651 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0652 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0653 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0654 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0655 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0656 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0657 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0658 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0659 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0660 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0661 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0662 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0663 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0664 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0665 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0666 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0667 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0668 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0669 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0670 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0671 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0672 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0673 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0674 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0675 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0676 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0677 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0678 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0679 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0680 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0681 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0682 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0683 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0684 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0685 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0686 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0687 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0688 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0689 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0690 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0691 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0692 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0693 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0694 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0695 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0696 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0697 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0698 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0699 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0700 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0701 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0702 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0703 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0704 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0705 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0706 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0707 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0708 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0709 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0710 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0711 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0712 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0713 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0714 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0715 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0716 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0717 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0718 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0719 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0720 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0721 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0722 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0723 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0724 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0725 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0726 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0727 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0728 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0729 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0730 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0731 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0732 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0733 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0734 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0735 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0736 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0737 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0738 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0739 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0740 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0741 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0742 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0743 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0744 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0745 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0746 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0747 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0748 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0749 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0750 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0751 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0752 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0753 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0754 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0755 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0756 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0757 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0758 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0759 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0760 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0761 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0762 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0763 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0764 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0765 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0766 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0767 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0768 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0769 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0770 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0771 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0772 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0773 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0774 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0775 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0776 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0777 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0778 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0779 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0780 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0781 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0782 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0783 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0784 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0785 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0786 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0787 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0788 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0789 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0790 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0791 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0792 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0793 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0794 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0795 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0796 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0797 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0798 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0799 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0800 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0801 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0802 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0803 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0804 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0805 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0806 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0807 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0808 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0809 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0810 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0811 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0812 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0813 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0814 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0815 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0816 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0817 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0818 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0819 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0820 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0821 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0822 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0823 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0824 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0825 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0826 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0827 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0828 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0829 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0830 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0831 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0832 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0833 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0834 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0835 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0836 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0837 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0838 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0839 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0840 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0841 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0842 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0843 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0844 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0845 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0846 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0847 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0848 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0849 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0850 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0851 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0852 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0853 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0854 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0855 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0856 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0857 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0858 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0859 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0860 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0861 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0862 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0863 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0864 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0865 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0866 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0867 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0868 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0869 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0870 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0871 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0872 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0873 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0874 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0875 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0876 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0877 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0878 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0879 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0880 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0881 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0882 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0883 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0884 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0885 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0886 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0887 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0888 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0889 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0890 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0891 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0892 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0893 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0894 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0895 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0896 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0897 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0898 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0899 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0900 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0901 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0902 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0903 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0904 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0905 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0906 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0907 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0908 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0909 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0910 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0911 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0912 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0913 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0914 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0915 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0916 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0917 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0918 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0919 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0920 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0921 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0922 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0923 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0924 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0925 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0926 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0927 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0928 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0929 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0930 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0931 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0932 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0933 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0934 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0935 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0936 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0937 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0938 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0939 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0940 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0941 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0942 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0943 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0944 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0945 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0946 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0947 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0948 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0949 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0950 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0951 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0952 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0953 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0954 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0955 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0956 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0957 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0958 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0959 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0960 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0961 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0962 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0963 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0964 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0965 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0966 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0967 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0968 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0969 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0970 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0971 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0972 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0973 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0974 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0975 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0976 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0977 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0978 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0979 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0980 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0981 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0982 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0983 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0984 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0985 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0986 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0987 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0988 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0989 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0990 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0991 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0992 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0993 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0994 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0995 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0996 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0997 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0998 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0999 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + ports: + - name: port-name + port: 8080 + protocol: TCP + diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go index 792badbc3..d2ce66c1b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go @@ -45,7 +45,7 @@ import ( // and their field paths and types are exactly the same, then ExtractInto can be // called with the root resource as the object and the subresource as the // applyConfiguration. This works for "status", obviously, because status is -// represented by the exact same object as the root resource. This this does NOT +// represented by the exact same object as the root resource. This does NOT // work, for example, with the "scale" subresources of Deployment, ReplicaSet and // StatefulSet. While the spec.replicas, status.replicas fields are in the same // exact field path locations as they are in autoscaling.Scale, the selector diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go new file mode 100644 index 000000000..978ffb3c3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go @@ -0,0 +1,57 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedfields + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/managedfields/internal" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// FieldManager updates the managed fields and merges applied +// configurations. +type FieldManager = internal.FieldManager + +// NewDefaultFieldManager creates a new FieldManager that merges apply requests +// and update managed fields for other types of requests. +func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (*FieldManager, error) { + f, err := internal.NewStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields) + if err != nil { + return nil, fmt.Errorf("failed to create field manager: %v", err) + } + return internal.NewDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, subresource), nil +} + +// NewDefaultCRDFieldManager creates a new FieldManager specifically for +// CRDs. This allows for the possibility of fields which are not defined +// in models, as well as having no models defined at all. +func NewDefaultCRDFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ *FieldManager, err error) { + f, err := internal.NewCRDStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields) + if err != nil { + return nil, fmt.Errorf("failed to create field manager: %v", err) + } + return internal.NewDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, subresource), nil +} + +func ValidateManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) error { + _, err := internal.DecodeManagedFields(encodedManagedFields) + return err +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go new file mode 100644 index 000000000..b75ef7416 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go @@ -0,0 +1,60 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "sync" + "time" +) + +// AtMostEvery will never run the method more than once every specified +// duration. +type AtMostEvery struct { + delay time.Duration + lastCall time.Time + mutex sync.Mutex +} + +// NewAtMostEvery creates a new AtMostEvery, that will run the method at +// most every given duration. +func NewAtMostEvery(delay time.Duration) *AtMostEvery { + return &AtMostEvery{ + delay: delay, + } +} + +// updateLastCall returns true if the lastCall time has been updated, +// false if it was too early. +func (s *AtMostEvery) updateLastCall() bool { + s.mutex.Lock() + defer s.mutex.Unlock() + if time.Since(s.lastCall) < s.delay { + return false + } + s.lastCall = time.Now() + return true +} + +// Do will run the method if enough time has passed, and return true. +// Otherwise, it does nothing and returns false. +func (s *AtMostEvery) Do(fn func()) bool { + if !s.updateLastCall() { + return false + } + fn() + return true +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go new file mode 100644 index 000000000..fa342ca13 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type buildManagerInfoManager struct { + fieldManager Manager + groupVersion schema.GroupVersion + subresource string +} + +var _ Manager = &buildManagerInfoManager{} + +// NewBuildManagerInfoManager creates a new Manager that converts the manager name into a unique identifier +// combining operation and version for update requests, and just operation for apply requests. +func NewBuildManagerInfoManager(f Manager, gv schema.GroupVersion, subresource string) Manager { + return &buildManagerInfoManager{ + fieldManager: f, + groupVersion: gv, + subresource: subresource, + } +} + +// Update implements Manager. +func (f *buildManagerInfoManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + manager, err := f.buildManagerInfo(manager, metav1.ManagedFieldsOperationUpdate) + if err != nil { + return nil, nil, fmt.Errorf("failed to build manager identifier: %v", err) + } + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply implements Manager. +func (f *buildManagerInfoManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + manager, err := f.buildManagerInfo(manager, metav1.ManagedFieldsOperationApply) + if err != nil { + return nil, nil, fmt.Errorf("failed to build manager identifier: %v", err) + } + return f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) +} + +func (f *buildManagerInfoManager) buildManagerInfo(prefix string, operation metav1.ManagedFieldsOperationType) (string, error) { + managerInfo := metav1.ManagedFieldsEntry{ + Manager: prefix, + Operation: operation, + APIVersion: f.groupVersion.String(), + Subresource: f.subresource, + } + if managerInfo.Manager == "" { + managerInfo.Manager = "unknown" + } + return BuildManagerIdentifier(&managerInfo) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go new file mode 100644 index 000000000..8951932ba --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go @@ -0,0 +1,133 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + "sort" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type capManagersManager struct { + fieldManager Manager + maxUpdateManagers int + oldUpdatesManagerName string +} + +var _ Manager = &capManagersManager{} + +// NewCapManagersManager creates a new wrapped FieldManager which ensures that the number of managers from updates +// does not exceed maxUpdateManagers, by merging some of the oldest entries on each update. +func NewCapManagersManager(fieldManager Manager, maxUpdateManagers int) Manager { + return &capManagersManager{ + fieldManager: fieldManager, + maxUpdateManagers: maxUpdateManagers, + oldUpdatesManagerName: "ancient-changes", + } +} + +// Update implements Manager. +func (f *capManagersManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + object, managed, err := f.fieldManager.Update(liveObj, newObj, managed, manager) + if err != nil { + return object, managed, err + } + if managed, err = f.capUpdateManagers(managed); err != nil { + return nil, nil, fmt.Errorf("failed to cap update managers: %v", err) + } + return object, managed, nil +} + +// Apply implements Manager. +func (f *capManagersManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) +} + +// capUpdateManagers merges a number of the oldest update entries into versioned buckets, +// such that the number of entries from updates does not exceed f.maxUpdateManagers. +func (f *capManagersManager) capUpdateManagers(managed Managed) (newManaged Managed, err error) { + // Gather all entries from updates + updaters := []string{} + for manager, fields := range managed.Fields() { + if !fields.Applied() { + updaters = append(updaters, manager) + } + } + if len(updaters) <= f.maxUpdateManagers { + return managed, nil + } + + // If we have more than the maximum, sort the update entries by time, oldest first. + sort.Slice(updaters, func(i, j int) bool { + iTime, jTime, iSeconds, jSeconds := managed.Times()[updaters[i]], managed.Times()[updaters[j]], int64(0), int64(0) + if iTime != nil { + iSeconds = iTime.Unix() + } + if jTime != nil { + jSeconds = jTime.Unix() + } + if iSeconds != jSeconds { + return iSeconds < jSeconds + } + return updaters[i] < updaters[j] + }) + + // Merge the oldest updaters with versioned bucket managers until the number of updaters is under the cap + versionToFirstManager := map[string]string{} + for i, length := 0, len(updaters); i < len(updaters) && length > f.maxUpdateManagers; i++ { + manager := updaters[i] + vs := managed.Fields()[manager] + time := managed.Times()[manager] + version := string(vs.APIVersion()) + + // Create a new manager identifier for the versioned bucket entry. + // The version for this manager comes from the version of the update being merged into the bucket. + bucket, err := BuildManagerIdentifier(&metav1.ManagedFieldsEntry{ + Manager: f.oldUpdatesManagerName, + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: version, + }) + if err != nil { + return managed, fmt.Errorf("failed to create bucket manager for version %v: %v", version, err) + } + + // Merge the fieldets if this is not the first time the version was seen. + // Otherwise just record the manager name in versionToFirstManager + if first, ok := versionToFirstManager[version]; ok { + // If the bucket doesn't exists yet, create one. + if _, ok := managed.Fields()[bucket]; !ok { + s := managed.Fields()[first] + delete(managed.Fields(), first) + managed.Fields()[bucket] = s + } + + managed.Fields()[bucket] = fieldpath.NewVersionedSet(vs.Set().Union(managed.Fields()[bucket].Set()), vs.APIVersion(), vs.Applied()) + delete(managed.Fields(), manager) + length-- + + // Use the time from the update being merged into the bucket, since it is more recent. + managed.Times()[bucket] = time + } else { + versionToFirstManager[version] = manager + } + } + + return managed, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go new file mode 100644 index 000000000..8c044c915 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +// NewConflictError returns an error including details on the requests apply conflicts +func NewConflictError(conflicts merge.Conflicts) *errors.StatusError { + causes := []metav1.StatusCause{} + for _, conflict := range conflicts { + causes = append(causes, metav1.StatusCause{ + Type: metav1.CauseTypeFieldManagerConflict, + Message: fmt.Sprintf("conflict with %v", printManager(conflict.Manager)), + Field: conflict.Path.String(), + }) + } + return errors.NewApplyConflict(causes, getConflictMessage(conflicts)) +} + +func getConflictMessage(conflicts merge.Conflicts) string { + if len(conflicts) == 1 { + return fmt.Sprintf("Apply failed with 1 conflict: conflict with %v: %v", printManager(conflicts[0].Manager), conflicts[0].Path) + } + + m := map[string][]fieldpath.Path{} + for _, conflict := range conflicts { + m[conflict.Manager] = append(m[conflict.Manager], conflict.Path) + } + + uniqueManagers := []string{} + for manager := range m { + uniqueManagers = append(uniqueManagers, manager) + } + + // Print conflicts by sorted managers. + sort.Strings(uniqueManagers) + + messages := []string{} + for _, manager := range uniqueManagers { + messages = append(messages, fmt.Sprintf("conflicts with %v:", printManager(manager))) + for _, path := range m[manager] { + messages = append(messages, fmt.Sprintf("- %v", path)) + } + } + return fmt.Sprintf("Apply failed with %d conflicts: %s", len(conflicts), strings.Join(messages, "\n")) +} + +func printManager(manager string) string { + encodedManager := &metav1.ManagedFieldsEntry{} + if err := json.Unmarshal([]byte(manager), encodedManager); err != nil { + return fmt.Sprintf("%q", manager) + } + managerStr := fmt.Sprintf("%q", encodedManager.Manager) + if encodedManager.Subresource != "" { + managerStr = fmt.Sprintf("%s with subresource %q", managerStr, encodedManager.Subresource) + } + if encodedManager.Operation == metav1.ManagedFieldsOperationUpdate { + if encodedManager.Time == nil { + return fmt.Sprintf("%s using %v", managerStr, encodedManager.APIVersion) + } + return fmt.Sprintf("%s using %v at %v", managerStr, encodedManager.APIVersion, encodedManager.Time.UTC().Format(time.RFC3339)) + } + return managerStr +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go new file mode 100644 index 000000000..eca04a711 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go @@ -0,0 +1,209 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + "reflect" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +// DefaultMaxUpdateManagers defines the default maximum retained number of managedFields entries from updates +// if the number of update managers exceeds this, the oldest entries will be merged until the number is below the maximum. +// TODO(jennybuckley): Determine if this is really the best value. Ideally we wouldn't unnecessarily merge too many entries. +const DefaultMaxUpdateManagers int = 10 + +// DefaultTrackOnCreateProbability defines the default probability that the field management of an object +// starts being tracked from the object's creation, instead of from the first time the object is applied to. +const DefaultTrackOnCreateProbability float32 = 1 + +var atMostEverySecond = NewAtMostEvery(time.Second) + +// FieldManager updates the managed fields and merges applied +// configurations. +type FieldManager struct { + fieldManager Manager + subresource string +} + +// NewFieldManager creates a new FieldManager that decodes, manages, then re-encodes managedFields +// on update and apply requests. +func NewFieldManager(f Manager, subresource string) *FieldManager { + return &FieldManager{fieldManager: f, subresource: subresource} +} + +// newDefaultFieldManager is a helper function which wraps a Manager with certain default logic. +func NewDefaultFieldManager(f Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, subresource string) *FieldManager { + return NewFieldManager( + NewVersionCheckManager( + NewLastAppliedUpdater( + NewLastAppliedManager( + NewProbabilisticSkipNonAppliedManager( + NewCapManagersManager( + NewBuildManagerInfoManager( + NewManagedFieldsUpdater( + NewStripMetaManager(f), + ), kind.GroupVersion(), subresource, + ), DefaultMaxUpdateManagers, + ), objectCreater, DefaultTrackOnCreateProbability, + ), typeConverter, objectConverter, kind.GroupVersion(), + ), + ), kind, + ), subresource, + ) +} + +func decodeLiveOrNew(liveObj, newObj runtime.Object, ignoreManagedFieldsFromRequestObject bool) (Managed, error) { + liveAccessor, err := meta.Accessor(liveObj) + if err != nil { + return nil, err + } + + // We take the managedFields of the live object in case the request tries to + // manually set managedFields via a subresource. + if ignoreManagedFieldsFromRequestObject { + return emptyManagedFieldsOnErr(DecodeManagedFields(liveAccessor.GetManagedFields())) + } + + // If the object doesn't have metadata, we should just return without trying to + // set the managedFields at all, so creates/updates/patches will work normally. + newAccessor, err := meta.Accessor(newObj) + if err != nil { + return nil, err + } + + if isResetManagedFields(newAccessor.GetManagedFields()) { + return NewEmptyManaged(), nil + } + + // If the managed field is empty or we failed to decode it, + // let's try the live object. This is to prevent clients who + // don't understand managedFields from deleting it accidentally. + managed, err := DecodeManagedFields(newAccessor.GetManagedFields()) + if err != nil || len(managed.Fields()) == 0 { + return emptyManagedFieldsOnErr(DecodeManagedFields(liveAccessor.GetManagedFields())) + } + return managed, nil +} + +func emptyManagedFieldsOnErr(managed Managed, err error) (Managed, error) { + if err != nil { + return NewEmptyManaged(), nil + } + return managed, nil +} + +// Update is used when the object has already been merged (non-apply +// use-case), and simply updates the managed fields in the output +// object. +func (f *FieldManager) Update(liveObj, newObj runtime.Object, manager string) (object runtime.Object, err error) { + // First try to decode the managed fields provided in the update, + // This is necessary to allow directly updating managed fields. + isSubresource := f.subresource != "" + managed, err := decodeLiveOrNew(liveObj, newObj, isSubresource) + if err != nil { + return newObj, nil + } + + RemoveObjectManagedFields(newObj) + + if object, managed, err = f.fieldManager.Update(liveObj, newObj, managed, manager); err != nil { + return nil, err + } + + if err = EncodeObjectManagedFields(object, managed); err != nil { + return nil, fmt.Errorf("failed to encode managed fields: %v", err) + } + + return object, nil +} + +// UpdateNoErrors is the same as Update, but it will not return +// errors. If an error happens, the object is returned with +// managedFields cleared. +func (f *FieldManager) UpdateNoErrors(liveObj, newObj runtime.Object, manager string) runtime.Object { + obj, err := f.Update(liveObj, newObj, manager) + if err != nil { + atMostEverySecond.Do(func() { + ns, name := "unknown", "unknown" + if accessor, err := meta.Accessor(newObj); err == nil { + ns = accessor.GetNamespace() + name = accessor.GetName() + } + + klog.ErrorS(err, "[SHOULD NOT HAPPEN] failed to update managedFields", "versionKind", + newObj.GetObjectKind().GroupVersionKind(), "namespace", ns, "name", name) + }) + // Explicitly remove managedFields on failure, so that + // we can't have garbage in it. + RemoveObjectManagedFields(newObj) + return newObj + } + return obj +} + +// Returns true if the managedFields indicate that the user is trying to +// reset the managedFields, i.e. if the list is non-nil but empty, or if +// the list has one empty item. +func isResetManagedFields(managedFields []metav1.ManagedFieldsEntry) bool { + if len(managedFields) == 0 { + return managedFields != nil + } + + if len(managedFields) == 1 { + return reflect.DeepEqual(managedFields[0], metav1.ManagedFieldsEntry{}) + } + + return false +} + +// Apply is used when server-side apply is called, as it merges the +// object and updates the managed fields. +func (f *FieldManager) Apply(liveObj, appliedObj runtime.Object, manager string, force bool) (object runtime.Object, err error) { + // If the object doesn't have metadata, apply isn't allowed. + accessor, err := meta.Accessor(liveObj) + if err != nil { + return nil, fmt.Errorf("couldn't get accessor: %v", err) + } + + // Decode the managed fields in the live object, since it isn't allowed in the patch. + managed, err := DecodeManagedFields(accessor.GetManagedFields()) + if err != nil { + return nil, fmt.Errorf("failed to decode managed fields: %v", err) + } + + object, managed, err = f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) + if err != nil { + if conflicts, ok := err.(merge.Conflicts); ok { + return nil, NewConflictError(conflicts) + } + return nil, err + } + + if err = EncodeObjectManagedFields(object, managed); err != nil { + return nil, fmt.Errorf("failed to encode managed fields: %v", err) + } + + return object, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go new file mode 100644 index 000000000..08186191a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "bytes" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// EmptyFields represents a set with no paths +// It looks like metav1.Fields{Raw: []byte("{}")} +var EmptyFields = func() metav1.FieldsV1 { + f, err := SetToFields(*fieldpath.NewSet()) + if err != nil { + panic("should never happen") + } + return f +}() + +// FieldsToSet creates a set paths from an input trie of fields +func FieldsToSet(f metav1.FieldsV1) (s fieldpath.Set, err error) { + err = s.FromJSON(bytes.NewReader(f.Raw)) + return s, err +} + +// SetToFields creates a trie of fields from an input set of paths +func SetToFields(s fieldpath.Set) (f metav1.FieldsV1, err error) { + f.Raw, err = s.ToJSON() + return f, err +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go new file mode 100644 index 000000000..b00b6b829 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go @@ -0,0 +1,50 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" +) + +// LastAppliedConfigAnnotation is the annotation used to store the previous +// configuration of a resource for use in a three way diff by UpdateApplyAnnotation. +// +// This is a copy of the corev1 annotation since we don't want to depend on the whole package. +const LastAppliedConfigAnnotation = "kubectl.kubernetes.io/last-applied-configuration" + +// SetLastApplied sets the last-applied annotation the given value in +// the object. +func SetLastApplied(obj runtime.Object, value string) error { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + var annotations = accessor.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + annotations[LastAppliedConfigAnnotation] = value + if err := apimachineryvalidation.ValidateAnnotationsSize(annotations); err != nil { + delete(annotations, LastAppliedConfigAnnotation) + } + accessor.SetAnnotations(annotations) + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go new file mode 100644 index 000000000..3f6cf8821 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go @@ -0,0 +1,171 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +type lastAppliedManager struct { + fieldManager Manager + typeConverter TypeConverter + objectConverter runtime.ObjectConvertor + groupVersion schema.GroupVersion +} + +var _ Manager = &lastAppliedManager{} + +// NewLastAppliedManager converts the client-side apply annotation to +// server-side apply managed fields +func NewLastAppliedManager(fieldManager Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, groupVersion schema.GroupVersion) Manager { + return &lastAppliedManager{ + fieldManager: fieldManager, + typeConverter: typeConverter, + objectConverter: objectConverter, + groupVersion: groupVersion, + } +} + +// Update implements Manager. +func (f *lastAppliedManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply will consider the last-applied annotation +// for upgrading an object managed by client-side apply to server-side apply +// without conflicts. +func (f *lastAppliedManager) Apply(liveObj, newObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + newLiveObj, newManaged, newErr := f.fieldManager.Apply(liveObj, newObj, managed, manager, force) + // Upgrade the client-side apply annotation only from kubectl server-side-apply. + // To opt-out of this behavior, users may specify a different field manager. + if manager != "kubectl" { + return newLiveObj, newManaged, newErr + } + + // Check if we have conflicts + if newErr == nil { + return newLiveObj, newManaged, newErr + } + conflicts, ok := newErr.(merge.Conflicts) + if !ok { + return newLiveObj, newManaged, newErr + } + conflictSet := conflictsToSet(conflicts) + + // Check if conflicts are allowed due to client-side apply, + // and if so, then force apply + allowedConflictSet, err := f.allowedConflictsFromLastApplied(liveObj) + if err != nil { + return newLiveObj, newManaged, newErr + } + if !conflictSet.Difference(allowedConflictSet).Empty() { + newConflicts := conflictsDifference(conflicts, allowedConflictSet) + return newLiveObj, newManaged, newConflicts + } + + return f.fieldManager.Apply(liveObj, newObj, managed, manager, true) +} + +func (f *lastAppliedManager) allowedConflictsFromLastApplied(liveObj runtime.Object) (*fieldpath.Set, error) { + var accessor, err = meta.Accessor(liveObj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + // If there is no client-side apply annotation, then there is nothing to do + var annotations = accessor.GetAnnotations() + if annotations == nil { + return nil, fmt.Errorf("no last applied annotation") + } + var lastApplied, ok = annotations[LastAppliedConfigAnnotation] + if !ok || lastApplied == "" { + return nil, fmt.Errorf("no last applied annotation") + } + + liveObjVersioned, err := f.objectConverter.ConvertToVersion(liveObj, f.groupVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert live obj to versioned: %v", err) + } + + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + if err != nil { + return nil, fmt.Errorf("failed to convert live obj to typed: %v", err) + } + + var lastAppliedObj = &unstructured.Unstructured{Object: map[string]interface{}{}} + err = json.Unmarshal([]byte(lastApplied), lastAppliedObj) + if err != nil { + return nil, fmt.Errorf("failed to decode last applied obj: %v in '%s'", err, lastApplied) + } + + if lastAppliedObj.GetAPIVersion() != f.groupVersion.String() { + return nil, fmt.Errorf("expected version of last applied to match live object '%s', but got '%s': %v", f.groupVersion.String(), lastAppliedObj.GetAPIVersion(), err) + } + + lastAppliedObjTyped, err := f.typeConverter.ObjectToTyped(lastAppliedObj) + if err != nil { + return nil, fmt.Errorf("failed to convert last applied to typed: %v", err) + } + + lastAppliedObjFieldSet, err := lastAppliedObjTyped.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create fieldset for last applied object: %v", err) + } + + comparison, err := lastAppliedObjTyped.Compare(liveObjTyped) + if err != nil { + return nil, fmt.Errorf("failed to compare last applied object and live object: %v", err) + } + + // Remove fields in last applied that are different, added, or missing in + // the live object. + // Because last-applied fields don't match the live object fields, + // then we don't own these fields. + lastAppliedObjFieldSet = lastAppliedObjFieldSet. + Difference(comparison.Modified). + Difference(comparison.Added). + Difference(comparison.Removed) + + return lastAppliedObjFieldSet, nil +} + +// TODO: replace with merge.Conflicts.ToSet() +func conflictsToSet(conflicts merge.Conflicts) *fieldpath.Set { + conflictSet := fieldpath.NewSet() + for _, conflict := range []merge.Conflict(conflicts) { + conflictSet.Insert(conflict.Path) + } + return conflictSet +} + +func conflictsDifference(conflicts merge.Conflicts, s *fieldpath.Set) merge.Conflicts { + newConflicts := []merge.Conflict{} + for _, conflict := range []merge.Conflict(conflicts) { + if !s.Has(conflict.Path) { + newConflicts = append(newConflicts, conflict) + } + } + return newConflicts +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go new file mode 100644 index 000000000..06e6c5d8c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go @@ -0,0 +1,102 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +type lastAppliedUpdater struct { + fieldManager Manager +} + +var _ Manager = &lastAppliedUpdater{} + +// NewLastAppliedUpdater sets the client-side apply annotation up to date with +// server-side apply managed fields +func NewLastAppliedUpdater(fieldManager Manager) Manager { + return &lastAppliedUpdater{ + fieldManager: fieldManager, + } +} + +// Update implements Manager. +func (f *lastAppliedUpdater) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// server-side apply managed fields +func (f *lastAppliedUpdater) Apply(liveObj, newObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + liveObj, managed, err := f.fieldManager.Apply(liveObj, newObj, managed, manager, force) + if err != nil { + return liveObj, managed, err + } + + // Sync the client-side apply annotation only from kubectl server-side apply. + // To opt-out of this behavior, users may specify a different field manager. + // + // If the client-side apply annotation doesn't exist, + // then continue because we have no annotation to update + if manager == "kubectl" && hasLastApplied(liveObj) { + lastAppliedValue, err := buildLastApplied(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to build last-applied annotation: %v", err) + } + err = SetLastApplied(liveObj, lastAppliedValue) + if err != nil { + return nil, nil, fmt.Errorf("failed to set last-applied annotation: %v", err) + } + } + return liveObj, managed, err +} + +func hasLastApplied(obj runtime.Object) bool { + var accessor, err = meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + var annotations = accessor.GetAnnotations() + if annotations == nil { + return false + } + lastApplied, ok := annotations[LastAppliedConfigAnnotation] + return ok && len(lastApplied) > 0 +} + +func buildLastApplied(obj runtime.Object) (string, error) { + obj = obj.DeepCopyObject() + + var accessor, err = meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + // Remove the annotation from the object before encoding the object + var annotations = accessor.GetAnnotations() + delete(annotations, LastAppliedConfigAnnotation) + accessor.SetAnnotations(annotations) + + lastApplied, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) + if err != nil { + return "", fmt.Errorf("couldn't encode object into last applied annotation: %v", err) + } + return string(lastApplied), nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go new file mode 100644 index 000000000..9b4c20326 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go @@ -0,0 +1,248 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "fmt" + "sort" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// ManagedInterface groups a fieldpath.ManagedFields together with the timestamps associated with each operation. +type ManagedInterface interface { + // Fields gets the fieldpath.ManagedFields. + Fields() fieldpath.ManagedFields + + // Times gets the timestamps associated with each operation. + Times() map[string]*metav1.Time +} + +type managedStruct struct { + fields fieldpath.ManagedFields + times map[string]*metav1.Time +} + +var _ ManagedInterface = &managedStruct{} + +// Fields implements ManagedInterface. +func (m *managedStruct) Fields() fieldpath.ManagedFields { + return m.fields +} + +// Times implements ManagedInterface. +func (m *managedStruct) Times() map[string]*metav1.Time { + return m.times +} + +// NewEmptyManaged creates an empty ManagedInterface. +func NewEmptyManaged() ManagedInterface { + return NewManaged(fieldpath.ManagedFields{}, map[string]*metav1.Time{}) +} + +// NewManaged creates a ManagedInterface from a fieldpath.ManagedFields and the timestamps associated with each operation. +func NewManaged(f fieldpath.ManagedFields, t map[string]*metav1.Time) ManagedInterface { + return &managedStruct{ + fields: f, + times: t, + } +} + +// RemoveObjectManagedFields removes the ManagedFields from the object +// before we merge so that it doesn't appear in the ManagedFields +// recursively. +func RemoveObjectManagedFields(obj runtime.Object) { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + accessor.SetManagedFields(nil) +} + +// EncodeObjectManagedFields converts and stores the fieldpathManagedFields into the objects ManagedFields +func EncodeObjectManagedFields(obj runtime.Object, managed ManagedInterface) error { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + encodedManagedFields, err := encodeManagedFields(managed) + if err != nil { + return fmt.Errorf("failed to convert back managed fields to API: %v", err) + } + accessor.SetManagedFields(encodedManagedFields) + + return nil +} + +// DecodeManagedFields converts ManagedFields from the wire format (api format) +// to the format used by sigs.k8s.io/structured-merge-diff +func DecodeManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (ManagedInterface, error) { + managed := managedStruct{} + managed.fields = make(fieldpath.ManagedFields, len(encodedManagedFields)) + managed.times = make(map[string]*metav1.Time, len(encodedManagedFields)) + + for i, encodedVersionedSet := range encodedManagedFields { + switch encodedVersionedSet.Operation { + case metav1.ManagedFieldsOperationApply, metav1.ManagedFieldsOperationUpdate: + default: + return nil, fmt.Errorf("operation must be `Apply` or `Update`") + } + if len(encodedVersionedSet.APIVersion) < 1 { + return nil, fmt.Errorf("apiVersion must not be empty") + } + switch encodedVersionedSet.FieldsType { + case "FieldsV1": + // Valid case. + case "": + return nil, fmt.Errorf("missing fieldsType in managed fields entry %d", i) + default: + return nil, fmt.Errorf("invalid fieldsType %q in managed fields entry %d", encodedVersionedSet.FieldsType, i) + } + manager, err := BuildManagerIdentifier(&encodedVersionedSet) + if err != nil { + return nil, fmt.Errorf("error decoding manager from %v: %v", encodedVersionedSet, err) + } + managed.fields[manager], err = decodeVersionedSet(&encodedVersionedSet) + if err != nil { + return nil, fmt.Errorf("error decoding versioned set from %v: %v", encodedVersionedSet, err) + } + managed.times[manager] = encodedVersionedSet.Time + } + return &managed, nil +} + +// BuildManagerIdentifier creates a manager identifier string from a ManagedFieldsEntry +func BuildManagerIdentifier(encodedManager *metav1.ManagedFieldsEntry) (manager string, err error) { + encodedManagerCopy := *encodedManager + + // Never include fields type in the manager identifier + encodedManagerCopy.FieldsType = "" + + // Never include the fields in the manager identifier + encodedManagerCopy.FieldsV1 = nil + + // Never include the time in the manager identifier + encodedManagerCopy.Time = nil + + // For appliers, don't include the APIVersion in the manager identifier, + // so it will always have the same manager identifier each time it applied. + if encodedManager.Operation == metav1.ManagedFieldsOperationApply { + encodedManagerCopy.APIVersion = "" + } + + // Use the remaining fields to build the manager identifier + b, err := json.Marshal(&encodedManagerCopy) + if err != nil { + return "", fmt.Errorf("error marshalling manager identifier: %v", err) + } + + return string(b), nil +} + +func decodeVersionedSet(encodedVersionedSet *metav1.ManagedFieldsEntry) (versionedSet fieldpath.VersionedSet, err error) { + fields := EmptyFields + if encodedVersionedSet.FieldsV1 != nil { + fields = *encodedVersionedSet.FieldsV1 + } + set, err := FieldsToSet(fields) + if err != nil { + return nil, fmt.Errorf("error decoding set: %v", err) + } + return fieldpath.NewVersionedSet(&set, fieldpath.APIVersion(encodedVersionedSet.APIVersion), encodedVersionedSet.Operation == metav1.ManagedFieldsOperationApply), nil +} + +// encodeManagedFields converts ManagedFields from the format used by +// sigs.k8s.io/structured-merge-diff to the wire format (api format) +func encodeManagedFields(managed ManagedInterface) (encodedManagedFields []metav1.ManagedFieldsEntry, err error) { + if len(managed.Fields()) == 0 { + return nil, nil + } + encodedManagedFields = []metav1.ManagedFieldsEntry{} + for manager := range managed.Fields() { + versionedSet := managed.Fields()[manager] + v, err := encodeManagerVersionedSet(manager, versionedSet) + if err != nil { + return nil, fmt.Errorf("error encoding versioned set for %v: %v", manager, err) + } + if t, ok := managed.Times()[manager]; ok { + v.Time = t + } + encodedManagedFields = append(encodedManagedFields, *v) + } + return sortEncodedManagedFields(encodedManagedFields) +} + +func sortEncodedManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (sortedManagedFields []metav1.ManagedFieldsEntry, err error) { + sort.Slice(encodedManagedFields, func(i, j int) bool { + p, q := encodedManagedFields[i], encodedManagedFields[j] + + if p.Operation != q.Operation { + return p.Operation < q.Operation + } + + pSeconds, qSeconds := int64(0), int64(0) + if p.Time != nil { + pSeconds = p.Time.Unix() + } + if q.Time != nil { + qSeconds = q.Time.Unix() + } + if pSeconds != qSeconds { + return pSeconds < qSeconds + } + + if p.Manager != q.Manager { + return p.Manager < q.Manager + } + + if p.APIVersion != q.APIVersion { + return p.APIVersion < q.APIVersion + } + return p.Subresource < q.Subresource + }) + + return encodedManagedFields, nil +} + +func encodeManagerVersionedSet(manager string, versionedSet fieldpath.VersionedSet) (encodedVersionedSet *metav1.ManagedFieldsEntry, err error) { + encodedVersionedSet = &metav1.ManagedFieldsEntry{} + + // Get as many fields as we can from the manager identifier + err = json.Unmarshal([]byte(manager), encodedVersionedSet) + if err != nil { + return nil, fmt.Errorf("error unmarshalling manager identifier %v: %v", manager, err) + } + + // Get the APIVersion, Operation, and Fields from the VersionedSet + encodedVersionedSet.APIVersion = string(versionedSet.APIVersion()) + if versionedSet.Applied() { + encodedVersionedSet.Operation = metav1.ManagedFieldsOperationApply + } + encodedVersionedSet.FieldsType = "FieldsV1" + fields, err := SetToFields(*versionedSet.Set()) + if err != nil { + return nil, fmt.Errorf("error encoding set: %v", err) + } + encodedVersionedSet.FieldsV1 = &fields + + return encodedVersionedSet, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go new file mode 100644 index 000000000..376eed6b2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go @@ -0,0 +1,82 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type managedFieldsUpdater struct { + fieldManager Manager +} + +var _ Manager = &managedFieldsUpdater{} + +// NewManagedFieldsUpdater is responsible for updating the managedfields +// in the object, updating the time of the operation as necessary. For +// updates, it uses a hard-coded manager to detect if things have +// changed, and swaps back the correct manager after the operation is +// done. +func NewManagedFieldsUpdater(fieldManager Manager) Manager { + return &managedFieldsUpdater{ + fieldManager: fieldManager, + } +} + +// Update implements Manager. +func (f *managedFieldsUpdater) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + self := "current-operation" + object, managed, err := f.fieldManager.Update(liveObj, newObj, managed, self) + if err != nil { + return object, managed, err + } + + // If the current operation took any fields from anything, it means the object changed, + // so update the timestamp of the managedFieldsEntry and merge with any previous updates from the same manager + if vs, ok := managed.Fields()[self]; ok { + delete(managed.Fields(), self) + + if previous, ok := managed.Fields()[manager]; ok { + managed.Fields()[manager] = fieldpath.NewVersionedSet(vs.Set().Union(previous.Set()), vs.APIVersion(), vs.Applied()) + } else { + managed.Fields()[manager] = vs + } + + managed.Times()[manager] = &metav1.Time{Time: time.Now().UTC()} + } + + return object, managed, nil +} + +// Apply implements Manager. +func (f *managedFieldsUpdater) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + object, managed, err := f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) + if err != nil { + return object, managed, err + } + if object != nil { + managed.Times()[fieldManager] = &metav1.Time{Time: time.Now().UTC()} + } else { + object = liveObj.DeepCopyObject() + RemoveObjectManagedFields(object) + } + return object, managed, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go new file mode 100644 index 000000000..053936103 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go @@ -0,0 +1,52 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// Managed groups a fieldpath.ManagedFields together with the timestamps associated with each operation. +type Managed interface { + // Fields gets the fieldpath.ManagedFields. + Fields() fieldpath.ManagedFields + + // Times gets the timestamps associated with each operation. + Times() map[string]*metav1.Time +} + +// Manager updates the managed fields and merges applied configurations. +type Manager interface { + // Update is used when the object has already been merged (non-apply + // use-case), and simply updates the managed fields in the output + // object. + // * `liveObj` is not mutated by this function + // * `newObj` may be mutated by this function + // Returns the new object with managedFields removed, and the object's new + // proposed managedFields separately. + Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) + + // Apply is used when server-side apply is called, as it merges the + // object and updates the managed fields. + // * `liveObj` is not mutated by this function + // * `newObj` may be mutated by this function + // Returns the new object with managedFields removed, and the object's new + // proposed managedFields separately. + Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go new file mode 100644 index 000000000..1954d65d3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go @@ -0,0 +1,140 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +const ( + // Field indicates that the content of this path element is a field's name + Field = "f" + + // Value indicates that the content of this path element is a field's value + Value = "v" + + // Index indicates that the content of this path element is an index in an array + Index = "i" + + // Key indicates that the content of this path element is a key value map + Key = "k" + + // Separator separates the type of a path element from the contents + Separator = ":" +) + +// NewPathElement parses a serialized path element +func NewPathElement(s string) (fieldpath.PathElement, error) { + split := strings.SplitN(s, Separator, 2) + if len(split) < 2 { + return fieldpath.PathElement{}, fmt.Errorf("missing colon: %v", s) + } + switch split[0] { + case Field: + return fieldpath.PathElement{ + FieldName: &split[1], + }, nil + case Value: + val, err := value.FromJSON([]byte(split[1])) + if err != nil { + return fieldpath.PathElement{}, err + } + return fieldpath.PathElement{ + Value: &val, + }, nil + case Index: + i, err := strconv.Atoi(split[1]) + if err != nil { + return fieldpath.PathElement{}, err + } + return fieldpath.PathElement{ + Index: &i, + }, nil + case Key: + kv := map[string]json.RawMessage{} + err := json.Unmarshal([]byte(split[1]), &kv) + if err != nil { + return fieldpath.PathElement{}, err + } + fields := value.FieldList{} + for k, v := range kv { + b, err := json.Marshal(v) + if err != nil { + return fieldpath.PathElement{}, err + } + val, err := value.FromJSON(b) + if err != nil { + return fieldpath.PathElement{}, err + } + + fields = append(fields, value.Field{ + Name: k, + Value: val, + }) + } + return fieldpath.PathElement{ + Key: &fields, + }, nil + default: + // Ignore unknown key types + return fieldpath.PathElement{}, nil + } +} + +// PathElementString serializes a path element +func PathElementString(pe fieldpath.PathElement) (string, error) { + switch { + case pe.FieldName != nil: + return Field + Separator + *pe.FieldName, nil + case pe.Key != nil: + kv := map[string]json.RawMessage{} + for _, k := range *pe.Key { + b, err := value.ToJSON(k.Value) + if err != nil { + return "", err + } + m := json.RawMessage{} + err = json.Unmarshal(b, &m) + if err != nil { + return "", err + } + kv[k.Name] = m + } + b, err := json.Marshal(kv) + if err != nil { + return "", err + } + return Key + ":" + string(b), nil + case pe.Value != nil: + b, err := value.ToJSON(*pe.Value) + if err != nil { + return "", err + } + return Value + ":" + string(b), nil + case pe.Index != nil: + return Index + ":" + strconv.Itoa(*pe.Index), nil + default: + return "", errors.New("Invalid type of path element") + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go new file mode 100644 index 000000000..f24c040ed --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + "math/rand" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" +) + +type skipNonAppliedManager struct { + fieldManager Manager + objectCreater runtime.ObjectCreater + beforeApplyManagerName string + probability float32 +} + +var _ Manager = &skipNonAppliedManager{} + +// NewSkipNonAppliedManager creates a new wrapped FieldManager that only starts tracking managers after the first apply. +func NewSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater) Manager { + return NewProbabilisticSkipNonAppliedManager(fieldManager, objectCreater, 0.0) +} + +// NewProbabilisticSkipNonAppliedManager creates a new wrapped FieldManager that starts tracking managers after the first apply, +// or starts tracking on create with p probability. +func NewProbabilisticSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, p float32) Manager { + return &skipNonAppliedManager{ + fieldManager: fieldManager, + objectCreater: objectCreater, + beforeApplyManagerName: "before-first-apply", + probability: p, + } +} + +// Update implements Manager. +func (f *skipNonAppliedManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + accessor, err := meta.Accessor(liveObj) + if err != nil { + return newObj, managed, nil + } + + // If managed fields is empty, we need to determine whether to skip tracking managed fields. + if len(managed.Fields()) == 0 { + // Check if the operation is a create, by checking whether lastObj's UID is empty. + // If the operation is create, P(tracking managed fields) = f.probability + // If the operation is update, skip tracking managed fields, since we already know managed fields is empty. + if len(accessor.GetUID()) == 0 { + if f.probability <= rand.Float32() { + return newObj, managed, nil + } + } else { + return newObj, managed, nil + } + } + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply implements Manager. +func (f *skipNonAppliedManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + if len(managed.Fields()) == 0 { + gvk := appliedObj.GetObjectKind().GroupVersionKind() + emptyObj, err := f.objectCreater.New(gvk) + if err != nil { + return nil, nil, fmt.Errorf("failed to create empty object of type %v: %v", gvk, err) + } + liveObj, managed, err = f.fieldManager.Update(emptyObj, liveObj, managed, f.beforeApplyManagerName) + if err != nil { + return nil, nil, fmt.Errorf("failed to create manager for existing fields: %v", err) + } + } + return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go new file mode 100644 index 000000000..9b61f3a6f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type stripMetaManager struct { + fieldManager Manager + + // stripSet is the list of fields that should never be part of a mangedFields. + stripSet *fieldpath.Set +} + +var _ Manager = &stripMetaManager{} + +// NewStripMetaManager creates a new Manager that strips metadata and typemeta fields from the manager's fieldset. +func NewStripMetaManager(fieldManager Manager) Manager { + return &stripMetaManager{ + fieldManager: fieldManager, + stripSet: fieldpath.NewSet( + fieldpath.MakePathOrDie("apiVersion"), + fieldpath.MakePathOrDie("kind"), + fieldpath.MakePathOrDie("metadata"), + fieldpath.MakePathOrDie("metadata", "name"), + fieldpath.MakePathOrDie("metadata", "namespace"), + fieldpath.MakePathOrDie("metadata", "creationTimestamp"), + fieldpath.MakePathOrDie("metadata", "selfLink"), + fieldpath.MakePathOrDie("metadata", "uid"), + fieldpath.MakePathOrDie("metadata", "clusterName"), + fieldpath.MakePathOrDie("metadata", "generation"), + fieldpath.MakePathOrDie("metadata", "managedFields"), + fieldpath.MakePathOrDie("metadata", "resourceVersion"), + ), + } +} + +// Update implements Manager. +func (f *stripMetaManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + newObj, managed, err := f.fieldManager.Update(liveObj, newObj, managed, manager) + if err != nil { + return nil, nil, err + } + f.stripFields(managed.Fields(), manager) + return newObj, managed, nil +} + +// Apply implements Manager. +func (f *stripMetaManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + newObj, managed, err := f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) + if err != nil { + return nil, nil, err + } + f.stripFields(managed.Fields(), manager) + return newObj, managed, nil +} + +// stripFields removes a predefined set of paths found in typed from managed +func (f *stripMetaManager) stripFields(managed fieldpath.ManagedFields, manager string) { + vs, ok := managed[manager] + if ok { + if vs == nil { + panic(fmt.Sprintf("Found unexpected nil manager which should never happen: %s", manager)) + } + newSet := vs.Set().Difference(f.stripSet) + if newSet.Empty() { + delete(managed, manager) + } else { + managed[manager] = fieldpath.NewVersionedSet(newSet, vs.APIVersion(), vs.Applied()) + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go new file mode 100644 index 000000000..2112c9ab7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go @@ -0,0 +1,186 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +type structuredMergeManager struct { + typeConverter TypeConverter + objectConverter runtime.ObjectConvertor + objectDefaulter runtime.ObjectDefaulter + groupVersion schema.GroupVersion + hubVersion schema.GroupVersion + updater merge.Updater +} + +var _ Manager = &structuredMergeManager{} + +// NewStructuredMergeManager creates a new Manager that merges apply requests +// and update managed fields for other types of requests. +func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (Manager, error) { + if typeConverter == nil { + return nil, fmt.Errorf("typeconverter must not be nil") + } + return &structuredMergeManager{ + typeConverter: typeConverter, + objectConverter: objectConverter, + objectDefaulter: objectDefaulter, + groupVersion: gv, + hubVersion: hub, + updater: merge.Updater{ + Converter: newVersionConverter(typeConverter, objectConverter, hub), // This is the converter provided to SMD from k8s + IgnoredFields: resetFields, + }, + }, nil +} + +// NewCRDStructuredMergeManager creates a new Manager specifically for +// CRDs. This allows for the possibility of fields which are not defined +// in models, as well as having no models defined at all. +func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ Manager, err error) { + return &structuredMergeManager{ + typeConverter: typeConverter, + objectConverter: objectConverter, + objectDefaulter: objectDefaulter, + groupVersion: gv, + hubVersion: hub, + updater: merge.Updater{ + Converter: newCRDVersionConverter(typeConverter, objectConverter, hub), + IgnoredFields: resetFields, + }, + }, nil +} + +func objectGVKNN(obj runtime.Object) string { + name := "" + namespace := "" + if accessor, err := meta.Accessor(obj); err == nil { + name = accessor.GetName() + namespace = accessor.GetNamespace() + } + + return fmt.Sprintf("%v/%v; %v", namespace, name, obj.GetObjectKind().GroupVersionKind()) +} + +// Update implements Manager. +func (f *structuredMergeManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + newObjVersioned, err := f.toVersioned(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object (%v) to proper version (%v): %v", objectGVKNN(newObj), f.groupVersion, err) + } + liveObjVersioned, err := f.toVersioned(liveObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err) + } + newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object (%v) to smd typed: %v", objectGVKNN(newObjVersioned), err) + } + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object (%v) to smd typed: %v", objectGVKNN(liveObjVersioned), err) + } + apiVersion := fieldpath.APIVersion(f.groupVersion.String()) + + // TODO(apelisse) use the first return value when unions are implemented + _, managedFields, err := f.updater.Update(liveObjTyped, newObjTyped, apiVersion, managed.Fields(), manager) + if err != nil { + return nil, nil, fmt.Errorf("failed to update ManagedFields (%v): %v", objectGVKNN(newObjVersioned), err) + } + managed = NewManaged(managedFields, managed.Times()) + + return newObj, managed, nil +} + +// Apply implements Manager. +func (f *structuredMergeManager) Apply(liveObj, patchObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + // Check that the patch object has the same version as the live object + if patchVersion := patchObj.GetObjectKind().GroupVersionKind().GroupVersion(); patchVersion != f.groupVersion { + return nil, nil, + errors.NewBadRequest( + fmt.Sprintf("Incorrect version specified in apply patch. "+ + "Specified patch version: %s, expected: %s", + patchVersion, f.groupVersion)) + } + + patchObjMeta, err := meta.Accessor(patchObj) + if err != nil { + return nil, nil, fmt.Errorf("couldn't get accessor: %v", err) + } + if patchObjMeta.GetManagedFields() != nil { + return nil, nil, errors.NewBadRequest("metadata.managedFields must be nil") + } + + liveObjVersioned, err := f.toVersioned(liveObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err) + } + + patchObjTyped, err := f.typeConverter.ObjectToTyped(patchObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to create typed patch object (%v): %v", objectGVKNN(patchObj), err) + } + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to create typed live object (%v): %v", objectGVKNN(liveObjVersioned), err) + } + + apiVersion := fieldpath.APIVersion(f.groupVersion.String()) + newObjTyped, managedFields, err := f.updater.Apply(liveObjTyped, patchObjTyped, apiVersion, managed.Fields(), manager, force) + if err != nil { + return nil, nil, err + } + managed = NewManaged(managedFields, managed.Times()) + + if newObjTyped == nil { + return nil, managed, nil + } + + newObj, err := f.typeConverter.TypedToObject(newObjTyped) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new typed object (%v) to object: %v", objectGVKNN(patchObj), err) + } + + newObjVersioned, err := f.toVersioned(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object (%v) to proper version: %v", objectGVKNN(patchObj), err) + } + f.objectDefaulter.Default(newObjVersioned) + + newObjUnversioned, err := f.toUnversioned(newObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert to unversioned (%v): %v", objectGVKNN(patchObj), err) + } + return newObjUnversioned, managed, nil +} + +func (f *structuredMergeManager) toVersioned(obj runtime.Object) (runtime.Object, error) { + return f.objectConverter.ConvertToVersion(obj, f.groupVersion) +} + +func (f *structuredMergeManager) toUnversioned(obj runtime.Object) (runtime.Object, error) { + return f.objectConverter.ConvertToVersion(obj, f.hubVersion) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go new file mode 100644 index 000000000..1ac96d7f7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go @@ -0,0 +1,193 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/schemaconv" + "k8s.io/kube-openapi/pkg/validation/spec" + smdschema "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/typed" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// TypeConverter allows you to convert from runtime.Object to +// typed.TypedValue and the other way around. +type TypeConverter interface { + ObjectToTyped(runtime.Object) (*typed.TypedValue, error) + TypedToObject(*typed.TypedValue) (runtime.Object, error) +} + +type typeConverter struct { + parser map[schema.GroupVersionKind]*typed.ParseableType +} + +var _ TypeConverter = &typeConverter{} + +func NewTypeConverter(openapiSpec map[string]*spec.Schema, preserveUnknownFields bool) (TypeConverter, error) { + typeSchema, err := schemaconv.ToSchemaFromOpenAPI(openapiSpec, preserveUnknownFields) + if err != nil { + return nil, fmt.Errorf("failed to convert models to schema: %v", err) + } + + typeParser := typed.Parser{Schema: smdschema.Schema{Types: typeSchema.Types}} + tr := indexModels(&typeParser, openapiSpec) + + return &typeConverter{parser: tr}, nil +} + +func (c *typeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { + gvk := obj.GetObjectKind().GroupVersionKind() + t := c.parser[gvk] + if t == nil { + return nil, NewNoCorrespondingTypeError(gvk) + } + switch o := obj.(type) { + case *unstructured.Unstructured: + return t.FromUnstructured(o.UnstructuredContent()) + default: + return t.FromStructured(obj) + } +} + +func (c *typeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { + return valueToObject(value.AsValue()) +} + +type deducedTypeConverter struct{} + +// DeducedTypeConverter is a TypeConverter for CRDs that don't have a +// schema. It does implement the same interface though (and create the +// same types of objects), so that everything can still work the same. +// CRDs are merged with all their fields being "atomic" (lists +// included). +func NewDeducedTypeConverter() TypeConverter { + return deducedTypeConverter{} +} + +// ObjectToTyped converts an object into a TypedValue with a "deduced type". +func (deducedTypeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { + switch o := obj.(type) { + case *unstructured.Unstructured: + return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent()) + default: + return typed.DeducedParseableType.FromStructured(obj) + } +} + +// TypedToObject transforms the typed value into a runtime.Object. That +// is not specific to deduced type. +func (deducedTypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { + return valueToObject(value.AsValue()) +} + +func valueToObject(val value.Value) (runtime.Object, error) { + vu := val.Unstructured() + switch o := vu.(type) { + case map[string]interface{}: + return &unstructured.Unstructured{Object: o}, nil + default: + return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu) + } +} + +func indexModels( + typeParser *typed.Parser, + openAPISchemas map[string]*spec.Schema, +) map[schema.GroupVersionKind]*typed.ParseableType { + tr := map[schema.GroupVersionKind]*typed.ParseableType{} + for modelName, model := range openAPISchemas { + gvkList := parseGroupVersionKind(model.Extensions) + if len(gvkList) == 0 { + continue + } + + parsedType := typeParser.Type(modelName) + for _, gvk := range gvkList { + if len(gvk.Kind) > 0 { + tr[schema.GroupVersionKind(gvk)] = &parsedType + } + } + } + return tr +} + +// Get and parse GroupVersionKind from the extension. Returns empty if it doesn't have one. +func parseGroupVersionKind(extensions map[string]interface{}) []schema.GroupVersionKind { + gvkListResult := []schema.GroupVersionKind{} + + // Get the extensions + gvkExtension, ok := extensions["x-kubernetes-group-version-kind"] + if !ok { + return []schema.GroupVersionKind{} + } + + // gvk extension must be a list of at least 1 element. + gvkList, ok := gvkExtension.([]interface{}) + if !ok { + return []schema.GroupVersionKind{} + } + + for _, gvk := range gvkList { + var group, version, kind string + + // gvk extension list must be a map with group, version, and + // kind fields + if gvkMap, ok := gvk.(map[interface{}]interface{}); ok { + group, ok = gvkMap["group"].(string) + if !ok { + continue + } + version, ok = gvkMap["version"].(string) + if !ok { + continue + } + kind, ok = gvkMap["kind"].(string) + if !ok { + continue + } + + } else if gvkMap, ok := gvk.(map[string]interface{}); ok { + group, ok = gvkMap["group"].(string) + if !ok { + continue + } + version, ok = gvkMap["version"].(string) + if !ok { + continue + } + kind, ok = gvkMap["kind"].(string) + if !ok { + continue + } + } else { + continue + } + + gvkListResult = append(gvkListResult, schema.GroupVersionKind{ + Group: group, + Version: version, + Kind: kind, + }) + } + + return gvkListResult +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versioncheck.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versioncheck.go new file mode 100644 index 000000000..ee1e2bca7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versioncheck.go @@ -0,0 +1,52 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type versionCheckManager struct { + fieldManager Manager + gvk schema.GroupVersionKind +} + +var _ Manager = &versionCheckManager{} + +// NewVersionCheckManager creates a manager that makes sure that the +// applied object is in the proper version. +func NewVersionCheckManager(fieldManager Manager, gvk schema.GroupVersionKind) Manager { + return &versionCheckManager{fieldManager: fieldManager, gvk: gvk} +} + +// Update implements Manager. +func (f *versionCheckManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + // Nothing to do for updates, this is checked in many other places. + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply implements Manager. +func (f *versionCheckManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + if gvk := appliedObj.GetObjectKind().GroupVersionKind(); gvk != f.gvk { + return nil, nil, errors.NewBadRequest(fmt.Sprintf("invalid object type: %v", gvk)) + } + return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go new file mode 100644 index 000000000..45855fa4c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go @@ -0,0 +1,123 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" + "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +// versionConverter is an implementation of +// sigs.k8s.io/structured-merge-diff/merge.Converter +type versionConverter struct { + typeConverter TypeConverter + objectConvertor runtime.ObjectConvertor + hubGetter func(from schema.GroupVersion) schema.GroupVersion +} + +var _ merge.Converter = &versionConverter{} + +// NewVersionConverter builds a VersionConverter from a TypeConverter and an ObjectConvertor. +func newVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter { + return &versionConverter{ + typeConverter: t, + objectConvertor: o, + hubGetter: func(from schema.GroupVersion) schema.GroupVersion { + return schema.GroupVersion{ + Group: from.Group, + Version: h.Version, + } + }, + } +} + +// NewCRDVersionConverter builds a VersionConverter for CRDs from a TypeConverter and an ObjectConvertor. +func newCRDVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter { + return &versionConverter{ + typeConverter: t, + objectConvertor: o, + hubGetter: func(from schema.GroupVersion) schema.GroupVersion { + return h + }, + } +} + +// Convert implements sigs.k8s.io/structured-merge-diff/merge.Converter +func (v *versionConverter) Convert(object *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error) { + // Convert the smd typed value to a kubernetes object. + objectToConvert, err := v.typeConverter.TypedToObject(object) + if err != nil { + return object, err + } + + // Parse the target groupVersion. + groupVersion, err := schema.ParseGroupVersion(string(version)) + if err != nil { + return object, err + } + + // If attempting to convert to the same version as we already have, just return it. + fromVersion := objectToConvert.GetObjectKind().GroupVersionKind().GroupVersion() + if fromVersion == groupVersion { + return object, nil + } + + // Convert to internal + internalObject, err := v.objectConvertor.ConvertToVersion(objectToConvert, v.hubGetter(fromVersion)) + if err != nil { + return object, err + } + + // Convert the object into the target version + convertedObject, err := v.objectConvertor.ConvertToVersion(internalObject, groupVersion) + if err != nil { + return object, err + } + + // Convert the object back to a smd typed value and return it. + return v.typeConverter.ObjectToTyped(convertedObject) +} + +// IsMissingVersionError +func (v *versionConverter) IsMissingVersionError(err error) bool { + return runtime.IsNotRegisteredError(err) || isNoCorrespondingTypeError(err) +} + +type noCorrespondingTypeErr struct { + gvk schema.GroupVersionKind +} + +func NewNoCorrespondingTypeError(gvk schema.GroupVersionKind) error { + return &noCorrespondingTypeErr{gvk: gvk} +} + +func (k *noCorrespondingTypeErr) Error() string { + return fmt.Sprintf("no corresponding type for %v", k.gvk) +} + +func isNoCorrespondingTypeError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*noCorrespondingTypeErr) + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml b/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml new file mode 100644 index 000000000..66e849f23 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml @@ -0,0 +1,261 @@ +apiVersion: v1 +kind: Node +metadata: + annotations: + container.googleapis.com/instance_id: "123456789321654789" + node.alpha.kubernetes.io/ttl: "0" + volumes.kubernetes.io/controller-managed-attach-detach: "true" + creationTimestamp: "2019-07-09T16:17:29Z" + labels: + kubernetes.io/arch: amd64 + beta.kubernetes.io/fluentd-ds-ready: "true" + beta.kubernetes.io/instance-type: n1-standard-4 + kubernetes.io/os: linux + cloud.google.com/gke-nodepool: default-pool + cloud.google.com/gke-os-distribution: cos + failure-domain.beta.kubernetes.io/region: us-central1 + failure-domain.beta.kubernetes.io/zone: us-central1-b + topology.kubernetes.io/region: us-central1 + topology.kubernetes.io/zone: us-central1-b + kubernetes.io/hostname: node-default-pool-something + name: node-default-pool-something + resourceVersion: "211582541" + selfLink: /api/v1/nodes/node-default-pool-something + uid: 0c24d0e1-a265-11e9-abe4-42010a80026b +spec: + podCIDR: 10.0.0.1/24 + providerID: some-provider-id-of-some-sort +status: + addresses: + - address: 10.0.0.1 + type: InternalIP + - address: 192.168.0.1 + type: ExternalIP + - address: node-default-pool-something + type: Hostname + allocatable: + cpu: 3920m + ephemeral-storage: "104638878617" + hugepages-2Mi: "0" + memory: 12700100Ki + pods: "110" + capacity: + cpu: "4" + ephemeral-storage: 202086868Ki + hugepages-2Mi: "0" + memory: 15399364Ki + pods: "110" + conditions: + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:08Z" + message: containerd is functioning properly + reason: FrequentContainerdRestart + status: "False" + type: FrequentContainerdRestart + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: docker overlay2 is functioning properly + reason: CorruptDockerOverlay2 + status: "False" + type: CorruptDockerOverlay2 + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: node is functioning properly + reason: UnregisterNetDevice + status: "False" + type: FrequentUnregisterNetDevice + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:17:04Z" + message: kernel has no deadlock + reason: KernelHasNoDeadlock + status: "False" + type: KernelDeadlock + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:17:04Z" + message: Filesystem is not read-only + reason: FilesystemIsNotReadOnly + status: "False" + type: ReadonlyFilesystem + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:05Z" + message: kubelet is functioning properly + reason: FrequentKubeletRestart + status: "False" + type: FrequentKubeletRestart + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: docker is functioning properly + reason: FrequentDockerRestart + status: "False" + type: FrequentDockerRestart + - lastHeartbeatTime: "2019-07-09T16:17:47Z" + lastTransitionTime: "2019-07-09T16:17:47Z" + message: RouteController created a route + reason: RouteCreated + status: "False" + type: NetworkUnavailable + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient disk space available + reason: KubeletHasSufficientDisk + status: "False" + type: OutOfDisk + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient memory available + reason: KubeletHasSufficientMemory + status: "False" + type: MemoryPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has no disk pressure + reason: KubeletHasNoDiskPressure + status: "False" + type: DiskPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient PID available + reason: KubeletHasSufficientPID + status: "False" + type: PIDPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:49Z" + message: kubelet is posting ready status. AppArmor enabled + reason: KubeletReady + status: "True" + type: Ready + daemonEndpoints: + kubeletEndpoint: + Port: 10250 + images: + - names: + - grafana/grafana@sha256:80e5e113a984d74836aa16f5b4524012099436b1a50df293f00ac6377fb512c8 + - grafana/grafana:4.4.2 + sizeBytes: 287008013 + - names: + - registry.k8s.io/node-problem-detector@sha256:f95cab985c26b2f46e9bd43283e0bfa88860c14e0fb0649266babe8b65e9eb2b + - registry.k8s.io/node-problem-detector:v0.4.1 + sizeBytes: 286572743 + - names: + - grafana/grafana@sha256:7ff7f9b2501a5d55b55ce3f58d21771b1c5af1f2a4ab7dbf11bef7142aae7033 + - grafana/grafana:4.2.0 + sizeBytes: 277940263 + - names: + - influxdb@sha256:7dddf03376348876ed4bdf33d6dfa3326f45a2bae0930dbd80781a374eb519bc + - influxdb:1.2.2 + sizeBytes: 223948571 + - names: + - gcr.io/stackdriver-agents/stackdriver-logging-agent@sha256:f8d5231b67b9c53f60068b535a11811d29d1b3efd53d2b79f2a2591ea338e4f2 + - gcr.io/stackdriver-agents/stackdriver-logging-agent:0.6-1.6.0-1 + sizeBytes: 223242132 + - names: + - nginx@sha256:35779791c05d119df4fe476db8f47c0bee5943c83eba5656a15fc046db48178b + - nginx:1.10.1 + sizeBytes: 180708613 + - names: + - registry.k8s.io/fluentd-elasticsearch@sha256:b8c94527b489fb61d3d81ce5ad7f3ddbb7be71e9620a3a36e2bede2f2e487d73 + - registry.k8s.io/fluentd-elasticsearch:v2.0.4 + sizeBytes: 135716379 + - names: + - nginx@sha256:00be67d6ba53d5318cd91c57771530f5251cfbe028b7be2c4b70526f988cfc9f + - nginx:latest + sizeBytes: 109357355 + - names: + - registry.k8s.io/kubernetes-dashboard-amd64@sha256:dc4026c1b595435ef5527ca598e1e9c4343076926d7d62b365c44831395adbd0 + - registry.k8s.io/kubernetes-dashboard-amd64:v1.8.3 + sizeBytes: 102319441 + - names: + - gcr.io/google_containers/kube-proxy:v1.11.10-gke.5 + - registry.k8s.io/kube-proxy:v1.11.10-gke.5 + sizeBytes: 102279340 + - names: + - registry.k8s.io/event-exporter@sha256:7f9cd7cb04d6959b0aa960727d04fa86759008048c785397b7b0d9dff0007516 + - registry.k8s.io/event-exporter:v0.2.3 + sizeBytes: 94171943 + - names: + - registry.k8s.io/prometheus-to-sd@sha256:6c0c742475363d537ff059136e5d5e4ab1f512ee0fd9b7ca42ea48bc309d1662 + - registry.k8s.io/prometheus-to-sd:v0.3.1 + sizeBytes: 88077694 + - names: + - registry.k8s.io/fluentd-gcp-scaler@sha256:a5ace7506d393c4ed65eb2cbb6312c64ab357fcea16dff76b9055bc6e498e5ff + - registry.k8s.io/fluentd-gcp-scaler:0.5.1 + sizeBytes: 86637208 + - names: + - registry.k8s.io/heapster-amd64@sha256:9fae0af136ce0cf4f88393b3670f7139ffc464692060c374d2ae748e13144521 + - registry.k8s.io/heapster-amd64:v1.6.0-beta.1 + sizeBytes: 76016169 + - names: + - registry.k8s.io/ingress-glbc-amd64@sha256:31d36bbd9c44caffa135fc78cf0737266fcf25e3cf0cd1c2fcbfbc4f7309cc52 + - registry.k8s.io/ingress-glbc-amd64:v1.1.1 + sizeBytes: 67801919 + - names: + - registry.k8s.io/kube-addon-manager@sha256:d53486c3a0b49ebee019932878dc44232735d5622a51dbbdcec7124199020d09 + - registry.k8s.io/kube-addon-manager:v8.7 + sizeBytes: 63322109 + - names: + - nginx@sha256:4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315 + - nginx:1.10-alpine + sizeBytes: 54042627 + - names: + - registry.k8s.io/cpvpa-amd64@sha256:cfe7b0a11c9c8e18c87b1eb34fef9a7cbb8480a8da11fc2657f78dbf4739f869 + - registry.k8s.io/cpvpa-amd64:v0.6.0 + sizeBytes: 51785854 + - names: + - registry.k8s.io/cluster-proportional-autoscaler-amd64@sha256:003f98d9f411ddfa6ff6d539196355e03ddd69fa4ed38c7ffb8fec6f729afe2d + - registry.k8s.io/cluster-proportional-autoscaler-amd64:1.1.2-r2 + sizeBytes: 49648481 + - names: + - registry.k8s.io/ip-masq-agent-amd64@sha256:1ffda57d87901bc01324c82ceb2145fe6a0448d3f0dd9cb65aa76a867cd62103 + - registry.k8s.io/ip-masq-agent-amd64:v2.1.1 + sizeBytes: 49612505 + - names: + - registry.k8s.io/k8s-dns-kube-dns-amd64@sha256:b99fc3eee2a9f052f7eb4cc00f15eb12fc405fa41019baa2d6b79847ae7284a8 + - registry.k8s.io/k8s-dns-kube-dns-amd64:1.14.10 + sizeBytes: 49549457 + - names: + - registry.k8s.io/rescheduler@sha256:156cfbfd05a5a815206fd2eeb6cbdaf1596d71ea4b415d3a6c43071dd7b99450 + - registry.k8s.io/rescheduler:v0.4.0 + sizeBytes: 48973149 + - names: + - registry.k8s.io/event-exporter@sha256:16ca66e2b5dc7a1ce6a5aafcb21d0885828b75cdfc08135430480f7ad2364adc + - registry.k8s.io/event-exporter:v0.2.4 + sizeBytes: 47261019 + - names: + - registry.k8s.io/coredns@sha256:db2bf53126ed1c761d5a41f24a1b82a461c85f736ff6e90542e9522be4757848 + - registry.k8s.io/coredns:1.1.3 + sizeBytes: 45587362 + - names: + - prom/prometheus@sha256:483f4c9d7733699ba79facca9f8bcce1cef1af43dfc3e7c5a1882aa85f53cb74 + - prom/prometheus:v1.1.3 + sizeBytes: 45493941 + nodeInfo: + architecture: amd64 + bootID: a32eca78-4ad4-4b76-9252-f143d6c2ae61 + containerRuntimeVersion: docker://17.3.2 + kernelVersion: 4.14.127+ + kubeProxyVersion: v1.11.10-gke.5 + kubeletVersion: v1.11.10-gke.5 + machineID: 1739555e5b231057f0f9a0b5fa29511b + operatingSystem: linux + osImage: Container-Optimized OS from Google + systemUUID: 1739555E-5B23-1057-F0F9-A0B5FA29511B + volumesAttached: + - devicePath: /dev/disk/by-id/b9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 + - devicePath: /dev/disk/by-id/b9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + volumesInUse: + - kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + - kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml b/vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml new file mode 100644 index 000000000..3fb0877d6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml @@ -0,0 +1,121 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: some-app + plugin1: some-value + plugin2: some-value + plugin3: some-value + plugin4: some-value + name: some-name + namespace: default + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: ReplicaSet + name: some-name + uid: 0a9d2b9e-779e-11e7-b422-42010a8001be +spec: + containers: + - args: + - one + - two + - three + - four + - five + - six + - seven + - eight + - nine + env: + - name: VAR_3 + valueFrom: + secretKeyRef: + key: some-other-key + name: some-oher-name + - name: VAR_2 + valueFrom: + secretKeyRef: + key: other-key + name: other-name + - name: VAR_1 + valueFrom: + secretKeyRef: + key: some-key + name: some-name + image: some-image-name + imagePullPolicy: IfNotPresent + name: some-name + resources: + requests: + cpu: '0' + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: default-token-hu5jz + readOnly: true + dnsPolicy: ClusterFirst + nodeName: node-name + priority: 0 + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 300 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 300 + volumes: + - name: default-token-hu5jz + secret: + defaultMode: 420 + secretName: default-token-hu5jz +status: + conditions: + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:31:18Z' + status: 'True' + type: Initialized + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:41:59Z' + status: 'True' + type: Ready + - lastProbeTime: null + lastTransitionTime: null + status: 'True' + type: ContainersReady + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:31:18Z' + status: 'True' + type: PodScheduled + containerStatuses: + - containerID: docker://885e82a1ed0b7356541bb410a0126921ac42439607c09875cd8097dd5d7b5376 + image: some-image-name + imageID: docker-pullable://some-image-id + lastState: + terminated: + containerID: docker://d57290f9e00fad626b20d2dd87a3cf69bbc22edae07985374f86a8b2b4e39565 + exitCode: 255 + finishedAt: '2019-07-08T09:39:09Z' + reason: Error + startedAt: '2019-07-08T09:38:54Z' + name: name + ready: true + restartCount: 6 + state: + running: + startedAt: '2019-07-08T09:41:59Z' + hostIP: 10.0.0.1 + phase: Running + podIP: 10.0.0.1 + qosClass: BestEffort + startTime: '2019-07-08T09:31:18Z' diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go new file mode 100644 index 000000000..48b774cec --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go @@ -0,0 +1,174 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedfields + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/managedfields/internal" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +var ( + scaleGroupVersion = schema.GroupVersion{Group: "autoscaling", Version: "v1"} + replicasPathInScale = fieldpath.MakePathOrDie("spec", "replicas") +) + +// ResourcePathMappings maps a group/version to its replicas path. The +// assumption is that all the paths correspond to leaf fields. +type ResourcePathMappings map[string]fieldpath.Path + +// ScaleHandler manages the conversion of managed fields between a main +// resource and the scale subresource +type ScaleHandler struct { + parentEntries []metav1.ManagedFieldsEntry + groupVersion schema.GroupVersion + mappings ResourcePathMappings +} + +// NewScaleHandler creates a new ScaleHandler +func NewScaleHandler(parentEntries []metav1.ManagedFieldsEntry, groupVersion schema.GroupVersion, mappings ResourcePathMappings) *ScaleHandler { + return &ScaleHandler{ + parentEntries: parentEntries, + groupVersion: groupVersion, + mappings: mappings, + } +} + +// ToSubresource filter the managed fields of the main resource and convert +// them so that they can be handled by scale. +// For the managed fields that have a replicas path it performs two changes: +// 1. APIVersion is changed to the APIVersion of the scale subresource +// 2. Replicas path of the main resource is transformed to the replicas path of +// the scale subresource +func (h *ScaleHandler) ToSubresource() ([]metav1.ManagedFieldsEntry, error) { + managed, err := internal.DecodeManagedFields(h.parentEntries) + if err != nil { + return nil, err + } + + f := fieldpath.ManagedFields{} + t := map[string]*metav1.Time{} + for manager, versionedSet := range managed.Fields() { + path, ok := h.mappings[string(versionedSet.APIVersion())] + // Skip the entry if the APIVersion is unknown + if !ok || path == nil { + continue + } + + if versionedSet.Set().Has(path) { + newVersionedSet := fieldpath.NewVersionedSet( + fieldpath.NewSet(replicasPathInScale), + fieldpath.APIVersion(scaleGroupVersion.String()), + versionedSet.Applied(), + ) + + f[manager] = newVersionedSet + t[manager] = managed.Times()[manager] + } + } + + return managedFieldsEntries(internal.NewManaged(f, t)) +} + +// ToParent merges `scaleEntries` with the entries of the main resource and +// transforms them accordingly +func (h *ScaleHandler) ToParent(scaleEntries []metav1.ManagedFieldsEntry) ([]metav1.ManagedFieldsEntry, error) { + decodedParentEntries, err := internal.DecodeManagedFields(h.parentEntries) + if err != nil { + return nil, err + } + parentFields := decodedParentEntries.Fields() + + decodedScaleEntries, err := internal.DecodeManagedFields(scaleEntries) + if err != nil { + return nil, err + } + scaleFields := decodedScaleEntries.Fields() + + f := fieldpath.ManagedFields{} + t := map[string]*metav1.Time{} + + for manager, versionedSet := range parentFields { + // Get the main resource "replicas" path + path, ok := h.mappings[string(versionedSet.APIVersion())] + // Drop the entry if the APIVersion is unknown. + if !ok { + continue + } + + // If the parent entry does not have the replicas path or it is nil, just + // keep it as it is. The path is nil for Custom Resources without scale + // subresource. + if path == nil || !versionedSet.Set().Has(path) { + f[manager] = versionedSet + t[manager] = decodedParentEntries.Times()[manager] + continue + } + + if _, ok := scaleFields[manager]; !ok { + // "Steal" the replicas path from the main resource entry + newSet := versionedSet.Set().Difference(fieldpath.NewSet(path)) + + if !newSet.Empty() { + newVersionedSet := fieldpath.NewVersionedSet( + newSet, + versionedSet.APIVersion(), + versionedSet.Applied(), + ) + f[manager] = newVersionedSet + t[manager] = decodedParentEntries.Times()[manager] + } + } else { + // Field wasn't stolen, let's keep the entry as it is. + f[manager] = versionedSet + t[manager] = decodedParentEntries.Times()[manager] + delete(scaleFields, manager) + } + } + + for manager, versionedSet := range scaleFields { + if !versionedSet.Set().Has(replicasPathInScale) { + continue + } + newVersionedSet := fieldpath.NewVersionedSet( + fieldpath.NewSet(h.mappings[h.groupVersion.String()]), + fieldpath.APIVersion(h.groupVersion.String()), + versionedSet.Applied(), + ) + f[manager] = newVersionedSet + t[manager] = decodedParentEntries.Times()[manager] + } + + return managedFieldsEntries(internal.NewManaged(f, t)) +} + +func managedFieldsEntries(entries internal.ManagedInterface) ([]metav1.ManagedFieldsEntry, error) { + obj := &unstructured.Unstructured{Object: map[string]interface{}{}} + if err := internal.EncodeObjectManagedFields(obj, entries); err != nil { + return nil, err + } + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + return accessor.GetManagedFields(), nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go new file mode 100644 index 000000000..d031eefaa --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedfields + +import ( + "k8s.io/apimachinery/pkg/util/managedfields/internal" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// TypeConverter allows you to convert from runtime.Object to +// typed.TypedValue and the other way around. +type TypeConverter = internal.TypeConverter + +// NewDeducedTypeConverter creates a TypeConverter for CRDs that don't +// have a schema. It does implement the same interface though (and +// create the same types of objects), so that everything can still work +// the same. CRDs are merged with all their fields being "atomic" (lists +// included). +func NewDeducedTypeConverter() TypeConverter { + return internal.NewDeducedTypeConverter() +} + +// NewTypeConverter builds a TypeConverter from a map of OpenAPIV3 schemas. +// This will automatically find the proper version of the object, and the +// corresponding schema information. +// The keys to the map must be consistent with the names +// used by Refs within the schemas. +// The schemas should conform to the Kubernetes Structural Schema OpenAPI +// restrictions found in docs: +// https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#specifying-a-structural-schema +func NewTypeConverter(openapiSpec map[string]*spec.Schema, preserveUnknownFields bool) (TypeConverter, error) { + return internal.NewTypeConverter(openapiSpec, preserveUnknownFields) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go index 990fa0d43..25626cf3a 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go @@ -20,7 +20,7 @@ import ( "fmt" "reflect" - "github.com/davecgh/go-spew/spew" + "k8s.io/apimachinery/pkg/util/dump" "sigs.k8s.io/yaml" ) @@ -76,7 +76,7 @@ func ToYAMLOrError(v interface{}) string { func toYAML(v interface{}) (string, error) { y, err := yaml.Marshal(v) if err != nil { - return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v)) + return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, dump.Pretty(v)) } return string(y), nil @@ -88,7 +88,7 @@ func toYAML(v interface{}) (string, error) { // supports JSON merge patch semantics. // // NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts. -// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). +// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). func HasConflicts(left, right interface{}) (bool, error) { switch typedLeft := left.(type) { case map[string]interface{}: diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go index 822416806..01d028e72 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go @@ -339,7 +339,7 @@ func chooseIPFromHostInterfaces(nw networkInterfacer, addressFamilies AddressFam for _, addr := range addrs { ip, _, err := netutils.ParseCIDRSloppy(addr.String()) if err != nil { - return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err) + return nil, fmt.Errorf("unable to parse CIDR for interface %q: %s", intf.Name, err) } if !memberOf(ip, family) { klog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name) diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go index c0fd4e20f..f54bb1e71 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go @@ -25,9 +25,9 @@ import ( var validSchemes = sets.NewString("http", "https", "") // SplitSchemeNamePort takes a string of the following forms: -// * "", returns "", "","", true -// * ":", returns "", "","",true -// * "::", returns "","","",true +// - "", returns "", "","", true +// - ":", returns "", "","",true +// - "::", returns "","","",true // // Name must be non-empty or valid will be returned false. // Scheme must be "http" or "https" if specified @@ -57,9 +57,10 @@ func SplitSchemeNamePort(id string) (scheme, name, port string, valid bool) { } // JoinSchemeNamePort returns a string that specifies the scheme, name, and port: -// * "" -// * ":" -// * "::" +// - "" +// - ":" +// - "::" +// // None of the parameters may contain a ':' character // Name is required // Scheme must be "", "http", or "https" diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/vendor/k8s.io/apimachinery/pkg/util/net/util.go index 5950087e0..1635e69a5 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/util.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/util.go @@ -20,11 +20,13 @@ import ( "errors" "net" "reflect" + "strings" "syscall" ) // IPNetEqual checks if the two input IPNets are representing the same subnet. // For example, +// // 10.0.0.1/24 and 10.0.0.0/24 are the same subnet. // 10.0.0.1/24 and 10.0.0.0/25 are not the same subnet. func IPNetEqual(ipnet1, ipnet2 *net.IPNet) bool { @@ -46,6 +48,11 @@ func IsConnectionReset(err error) bool { return false } +// Returns if the given err is "http2: client connection lost" error. +func IsHTTP2ConnectionLost(err error) bool { + return err != nil && strings.Contains(err.Error(), "http2: client connection lost") +} + // Returns if the given err is "connection refused" error func IsConnectionRefused(err error) bool { var errno syscall.Errno diff --git a/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go new file mode 100644 index 000000000..237ebaef4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + DefaultStreamCreationTimeout = 30 * time.Second + + // The SPDY subprotocol "channel.k8s.io" is used for remote command + // attachment/execution. This represents the initial unversioned subprotocol, + // which has the known bugs https://issues.k8s.io/13394 and + // https://issues.k8s.io/13395. + StreamProtocolV1Name = "channel.k8s.io" + + // The SPDY subprotocol "v2.channel.k8s.io" is used for remote command + // attachment/execution. It is the second version of the subprotocol and + // resolves the issues present in the first version. + StreamProtocolV2Name = "v2.channel.k8s.io" + + // The SPDY subprotocol "v3.channel.k8s.io" is used for remote command + // attachment/execution. It is the third version of the subprotocol and + // adds support for resizing container terminals. + StreamProtocolV3Name = "v3.channel.k8s.io" + + // The SPDY subprotocol "v4.channel.k8s.io" is used for remote command + // attachment/execution. It is the 4th version of the subprotocol and + // adds support for exit codes. + StreamProtocolV4Name = "v4.channel.k8s.io" + + NonZeroExitCodeReason = metav1.StatusReason("NonZeroExitCode") + ExitCodeCauseType = metav1.CauseType("ExitCode") +) + +var SupportedStreamingProtocols = []string{StreamProtocolV4Name, StreamProtocolV3Name, StreamProtocolV2Name, StreamProtocolV1Name} diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index 9f834fa53..d738725ca 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -142,7 +142,7 @@ func GetCaller() string { runtime.Callers(3, pc[:]) f := runtime.FuncForPC(pc[0]) if f == nil { - return fmt.Sprintf("Unable to find caller") + return "Unable to find caller" } return f.Name() } diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go index 9bfa85d43..4d7a17c3a 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,93 +14,75 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. - package sets -import ( - "reflect" - "sort" -) - -// sets.Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption. +// Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption. +// +// Deprecated: use generic Set instead. +// new ways: +// s1 := Set[byte]{} +// s2 := New[byte]() type Byte map[byte]Empty // NewByte creates a Byte from a list of values. func NewByte(items ...byte) Byte { - ss := Byte{} - ss.Insert(items...) - return ss + return Byte(New[byte](items...)) } // ByteKeySet creates a Byte from a keys of a map[byte](? extends interface{}). // If the value passed in is not actually a map, this will panic. -func ByteKeySet(theMap interface{}) Byte { - v := reflect.ValueOf(theMap) - ret := Byte{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(byte)) - } - return ret +func ByteKeySet[T any](theMap map[byte]T) Byte { + return Byte(KeySet(theMap)) } // Insert adds items to the set. func (s Byte) Insert(items ...byte) Byte { - for _, item := range items { - s[item] = Empty{} - } - return s + return Byte(cast(s).Insert(items...)) } // Delete removes all items from the set. func (s Byte) Delete(items ...byte) Byte { - for _, item := range items { - delete(s, item) - } - return s + return Byte(cast(s).Delete(items...)) } // Has returns true if and only if item is contained in the set. func (s Byte) Has(item byte) bool { - _, contained := s[item] - return contained + return cast(s).Has(item) } // HasAll returns true if and only if all items are contained in the set. func (s Byte) HasAll(items ...byte) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true + return cast(s).HasAll(items...) } // HasAny returns true if any items are contained in the set. func (s Byte) HasAny(items ...byte) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false + return cast(s).HasAny(items...) } -// Difference returns a set of objects that are not in s2 +// Clone returns a new set which is a copy of the current set. +func (s Byte) Clone() Byte { + return Byte(cast(s).Clone()) +} + +// Difference returns a set of objects that are not in s2. // For example: // s1 = {a1, a2, a3} // s2 = {a1, a2, a4, a5} // s1.Difference(s2) = {a3} // s2.Difference(s1) = {a4, a5} -func (s Byte) Difference(s2 Byte) Byte { - result := NewByte() - for key := range s { - if !s2.Has(key) { - result.Insert(key) - } - } - return result +func (s1 Byte) Difference(s2 Byte) Byte { + return Byte(cast(s1).Difference(cast(s2))) +} + +// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.SymmetricDifference(s2) = {a3, a4, a5} +// s2.SymmetricDifference(s1) = {a3, a4, a5} +func (s1 Byte) SymmetricDifference(s2 Byte) Byte { + return Byte(cast(s1).SymmetricDifference(cast(s2))) } // Union returns a new set which includes items in either s1 or s2. @@ -110,14 +92,7 @@ func (s Byte) Difference(s2 Byte) Byte { // s1.Union(s2) = {a1, a2, a3, a4} // s2.Union(s1) = {a1, a2, a3, a4} func (s1 Byte) Union(s2 Byte) Byte { - result := NewByte() - for key := range s1 { - result.Insert(key) - } - for key := range s2 { - result.Insert(key) - } - return result + return Byte(cast(s1).Union(cast(s2))) } // Intersection returns a new set which includes the item in BOTH s1 and s2 @@ -126,80 +101,37 @@ func (s1 Byte) Union(s2 Byte) Byte { // s2 = {a2, a3} // s1.Intersection(s2) = {a2} func (s1 Byte) Intersection(s2 Byte) Byte { - var walk, other Byte - result := NewByte() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result + return Byte(cast(s1).Intersection(cast(s2))) } // IsSuperset returns true if and only if s1 is a superset of s2. func (s1 Byte) IsSuperset(s2 Byte) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true + return cast(s1).IsSuperset(cast(s2)) } // Equal returns true if and only if s1 is equal (as a set) to s2. // Two sets are equal if their membership is identical. // (In practice, this means same elements, order doesn't matter) func (s1 Byte) Equal(s2 Byte) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) + return cast(s1).Equal(cast(s2)) } -type sortableSliceOfByte []byte - -func (s sortableSliceOfByte) Len() int { return len(s) } -func (s sortableSliceOfByte) Less(i, j int) bool { return lessByte(s[i], s[j]) } -func (s sortableSliceOfByte) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - // List returns the contents as a sorted byte slice. func (s Byte) List() []byte { - res := make(sortableSliceOfByte, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []byte(res) + return List(cast(s)) } // UnsortedList returns the slice with contents in random order. func (s Byte) UnsortedList() []byte { - res := make([]byte, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res + return cast(s).UnsortedList() } -// Returns a single element from the set. +// PopAny returns a single element from the set. func (s Byte) PopAny() (byte, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue byte - return zeroValue, false + return cast(s).PopAny() } // Len returns the size of the set. func (s Byte) Len() int { return len(s) } - -func lessByte(lhs, rhs byte) bool { - return lhs < rhs -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go index b152a0bf0..194883390 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. - -// Package sets has auto-generated set types. +// Package sets has generic set and specified sets. Generic set will +// replace specified ones over time. And specific ones are deprecated. package sets diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go index e11e622c5..fbb1df06d 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,8 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. - package sets // Empty is public since it is used by some internal API objects for conversions between external diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go index 88bd70967..5876fc9de 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,93 +14,75 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. - package sets -import ( - "reflect" - "sort" -) - -// sets.Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption. +// Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption. +// +// Deprecated: use generic Set instead. +// new ways: +// s1 := Set[int]{} +// s2 := New[int]() type Int map[int]Empty // NewInt creates a Int from a list of values. func NewInt(items ...int) Int { - ss := Int{} - ss.Insert(items...) - return ss + return Int(New[int](items...)) } // IntKeySet creates a Int from a keys of a map[int](? extends interface{}). // If the value passed in is not actually a map, this will panic. -func IntKeySet(theMap interface{}) Int { - v := reflect.ValueOf(theMap) - ret := Int{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(int)) - } - return ret +func IntKeySet[T any](theMap map[int]T) Int { + return Int(KeySet(theMap)) } // Insert adds items to the set. func (s Int) Insert(items ...int) Int { - for _, item := range items { - s[item] = Empty{} - } - return s + return Int(cast(s).Insert(items...)) } // Delete removes all items from the set. func (s Int) Delete(items ...int) Int { - for _, item := range items { - delete(s, item) - } - return s + return Int(cast(s).Delete(items...)) } // Has returns true if and only if item is contained in the set. func (s Int) Has(item int) bool { - _, contained := s[item] - return contained + return cast(s).Has(item) } // HasAll returns true if and only if all items are contained in the set. func (s Int) HasAll(items ...int) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true + return cast(s).HasAll(items...) } // HasAny returns true if any items are contained in the set. func (s Int) HasAny(items ...int) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false + return cast(s).HasAny(items...) } -// Difference returns a set of objects that are not in s2 +// Clone returns a new set which is a copy of the current set. +func (s Int) Clone() Int { + return Int(cast(s).Clone()) +} + +// Difference returns a set of objects that are not in s2. // For example: // s1 = {a1, a2, a3} // s2 = {a1, a2, a4, a5} // s1.Difference(s2) = {a3} // s2.Difference(s1) = {a4, a5} -func (s Int) Difference(s2 Int) Int { - result := NewInt() - for key := range s { - if !s2.Has(key) { - result.Insert(key) - } - } - return result +func (s1 Int) Difference(s2 Int) Int { + return Int(cast(s1).Difference(cast(s2))) +} + +// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.SymmetricDifference(s2) = {a3, a4, a5} +// s2.SymmetricDifference(s1) = {a3, a4, a5} +func (s1 Int) SymmetricDifference(s2 Int) Int { + return Int(cast(s1).SymmetricDifference(cast(s2))) } // Union returns a new set which includes items in either s1 or s2. @@ -110,14 +92,7 @@ func (s Int) Difference(s2 Int) Int { // s1.Union(s2) = {a1, a2, a3, a4} // s2.Union(s1) = {a1, a2, a3, a4} func (s1 Int) Union(s2 Int) Int { - result := NewInt() - for key := range s1 { - result.Insert(key) - } - for key := range s2 { - result.Insert(key) - } - return result + return Int(cast(s1).Union(cast(s2))) } // Intersection returns a new set which includes the item in BOTH s1 and s2 @@ -126,80 +101,37 @@ func (s1 Int) Union(s2 Int) Int { // s2 = {a2, a3} // s1.Intersection(s2) = {a2} func (s1 Int) Intersection(s2 Int) Int { - var walk, other Int - result := NewInt() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result + return Int(cast(s1).Intersection(cast(s2))) } // IsSuperset returns true if and only if s1 is a superset of s2. func (s1 Int) IsSuperset(s2 Int) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true + return cast(s1).IsSuperset(cast(s2)) } // Equal returns true if and only if s1 is equal (as a set) to s2. // Two sets are equal if their membership is identical. // (In practice, this means same elements, order doesn't matter) func (s1 Int) Equal(s2 Int) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) + return cast(s1).Equal(cast(s2)) } -type sortableSliceOfInt []int - -func (s sortableSliceOfInt) Len() int { return len(s) } -func (s sortableSliceOfInt) Less(i, j int) bool { return lessInt(s[i], s[j]) } -func (s sortableSliceOfInt) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - // List returns the contents as a sorted int slice. func (s Int) List() []int { - res := make(sortableSliceOfInt, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []int(res) + return List(cast(s)) } // UnsortedList returns the slice with contents in random order. func (s Int) UnsortedList() []int { - res := make([]int, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res + return cast(s).UnsortedList() } -// Returns a single element from the set. +// PopAny returns a single element from the set. func (s Int) PopAny() (int, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue int - return zeroValue, false + return cast(s).PopAny() } // Len returns the size of the set. func (s Int) Len() int { return len(s) } - -func lessInt(lhs, rhs int) bool { - return lhs < rhs -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go index 96a485554..2c640c5d0 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,93 +14,75 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. - package sets -import ( - "reflect" - "sort" -) - -// sets.Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption. +// Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption. +// +// Deprecated: use generic Set instead. +// new ways: +// s1 := Set[int32]{} +// s2 := New[int32]() type Int32 map[int32]Empty // NewInt32 creates a Int32 from a list of values. func NewInt32(items ...int32) Int32 { - ss := Int32{} - ss.Insert(items...) - return ss + return Int32(New[int32](items...)) } // Int32KeySet creates a Int32 from a keys of a map[int32](? extends interface{}). // If the value passed in is not actually a map, this will panic. -func Int32KeySet(theMap interface{}) Int32 { - v := reflect.ValueOf(theMap) - ret := Int32{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(int32)) - } - return ret +func Int32KeySet[T any](theMap map[int32]T) Int32 { + return Int32(KeySet(theMap)) } // Insert adds items to the set. func (s Int32) Insert(items ...int32) Int32 { - for _, item := range items { - s[item] = Empty{} - } - return s + return Int32(cast(s).Insert(items...)) } // Delete removes all items from the set. func (s Int32) Delete(items ...int32) Int32 { - for _, item := range items { - delete(s, item) - } - return s + return Int32(cast(s).Delete(items...)) } // Has returns true if and only if item is contained in the set. func (s Int32) Has(item int32) bool { - _, contained := s[item] - return contained + return cast(s).Has(item) } // HasAll returns true if and only if all items are contained in the set. func (s Int32) HasAll(items ...int32) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true + return cast(s).HasAll(items...) } // HasAny returns true if any items are contained in the set. func (s Int32) HasAny(items ...int32) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false + return cast(s).HasAny(items...) } -// Difference returns a set of objects that are not in s2 +// Clone returns a new set which is a copy of the current set. +func (s Int32) Clone() Int32 { + return Int32(cast(s).Clone()) +} + +// Difference returns a set of objects that are not in s2. // For example: // s1 = {a1, a2, a3} // s2 = {a1, a2, a4, a5} // s1.Difference(s2) = {a3} // s2.Difference(s1) = {a4, a5} -func (s Int32) Difference(s2 Int32) Int32 { - result := NewInt32() - for key := range s { - if !s2.Has(key) { - result.Insert(key) - } - } - return result +func (s1 Int32) Difference(s2 Int32) Int32 { + return Int32(cast(s1).Difference(cast(s2))) +} + +// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.SymmetricDifference(s2) = {a3, a4, a5} +// s2.SymmetricDifference(s1) = {a3, a4, a5} +func (s1 Int32) SymmetricDifference(s2 Int32) Int32 { + return Int32(cast(s1).SymmetricDifference(cast(s2))) } // Union returns a new set which includes items in either s1 or s2. @@ -110,14 +92,7 @@ func (s Int32) Difference(s2 Int32) Int32 { // s1.Union(s2) = {a1, a2, a3, a4} // s2.Union(s1) = {a1, a2, a3, a4} func (s1 Int32) Union(s2 Int32) Int32 { - result := NewInt32() - for key := range s1 { - result.Insert(key) - } - for key := range s2 { - result.Insert(key) - } - return result + return Int32(cast(s1).Union(cast(s2))) } // Intersection returns a new set which includes the item in BOTH s1 and s2 @@ -126,80 +101,37 @@ func (s1 Int32) Union(s2 Int32) Int32 { // s2 = {a2, a3} // s1.Intersection(s2) = {a2} func (s1 Int32) Intersection(s2 Int32) Int32 { - var walk, other Int32 - result := NewInt32() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result + return Int32(cast(s1).Intersection(cast(s2))) } // IsSuperset returns true if and only if s1 is a superset of s2. func (s1 Int32) IsSuperset(s2 Int32) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true + return cast(s1).IsSuperset(cast(s2)) } // Equal returns true if and only if s1 is equal (as a set) to s2. // Two sets are equal if their membership is identical. // (In practice, this means same elements, order doesn't matter) func (s1 Int32) Equal(s2 Int32) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) + return cast(s1).Equal(cast(s2)) } -type sortableSliceOfInt32 []int32 - -func (s sortableSliceOfInt32) Len() int { return len(s) } -func (s sortableSliceOfInt32) Less(i, j int) bool { return lessInt32(s[i], s[j]) } -func (s sortableSliceOfInt32) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - // List returns the contents as a sorted int32 slice. func (s Int32) List() []int32 { - res := make(sortableSliceOfInt32, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []int32(res) + return List(cast(s)) } // UnsortedList returns the slice with contents in random order. func (s Int32) UnsortedList() []int32 { - res := make([]int32, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res + return cast(s).UnsortedList() } -// Returns a single element from the set. +// PopAny returns a single element from the set. func (s Int32) PopAny() (int32, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue int32 - return zeroValue, false + return cast(s).PopAny() } // Len returns the size of the set. func (s Int32) Len() int { return len(s) } - -func lessInt32(lhs, rhs int32) bool { - return lhs < rhs -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go index b375a1b06..bf3eb3ffa 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,93 +14,75 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. - package sets -import ( - "reflect" - "sort" -) - -// sets.Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption. +// Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption. +// +// Deprecated: use generic Set instead. +// new ways: +// s1 := Set[int64]{} +// s2 := New[int64]() type Int64 map[int64]Empty // NewInt64 creates a Int64 from a list of values. func NewInt64(items ...int64) Int64 { - ss := Int64{} - ss.Insert(items...) - return ss + return Int64(New[int64](items...)) } // Int64KeySet creates a Int64 from a keys of a map[int64](? extends interface{}). // If the value passed in is not actually a map, this will panic. -func Int64KeySet(theMap interface{}) Int64 { - v := reflect.ValueOf(theMap) - ret := Int64{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(int64)) - } - return ret +func Int64KeySet[T any](theMap map[int64]T) Int64 { + return Int64(KeySet(theMap)) } // Insert adds items to the set. func (s Int64) Insert(items ...int64) Int64 { - for _, item := range items { - s[item] = Empty{} - } - return s + return Int64(cast(s).Insert(items...)) } // Delete removes all items from the set. func (s Int64) Delete(items ...int64) Int64 { - for _, item := range items { - delete(s, item) - } - return s + return Int64(cast(s).Delete(items...)) } // Has returns true if and only if item is contained in the set. func (s Int64) Has(item int64) bool { - _, contained := s[item] - return contained + return cast(s).Has(item) } // HasAll returns true if and only if all items are contained in the set. func (s Int64) HasAll(items ...int64) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true + return cast(s).HasAll(items...) } // HasAny returns true if any items are contained in the set. func (s Int64) HasAny(items ...int64) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false + return cast(s).HasAny(items...) } -// Difference returns a set of objects that are not in s2 +// Clone returns a new set which is a copy of the current set. +func (s Int64) Clone() Int64 { + return Int64(cast(s).Clone()) +} + +// Difference returns a set of objects that are not in s2. // For example: // s1 = {a1, a2, a3} // s2 = {a1, a2, a4, a5} // s1.Difference(s2) = {a3} // s2.Difference(s1) = {a4, a5} -func (s Int64) Difference(s2 Int64) Int64 { - result := NewInt64() - for key := range s { - if !s2.Has(key) { - result.Insert(key) - } - } - return result +func (s1 Int64) Difference(s2 Int64) Int64 { + return Int64(cast(s1).Difference(cast(s2))) +} + +// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.SymmetricDifference(s2) = {a3, a4, a5} +// s2.SymmetricDifference(s1) = {a3, a4, a5} +func (s1 Int64) SymmetricDifference(s2 Int64) Int64 { + return Int64(cast(s1).SymmetricDifference(cast(s2))) } // Union returns a new set which includes items in either s1 or s2. @@ -110,14 +92,7 @@ func (s Int64) Difference(s2 Int64) Int64 { // s1.Union(s2) = {a1, a2, a3, a4} // s2.Union(s1) = {a1, a2, a3, a4} func (s1 Int64) Union(s2 Int64) Int64 { - result := NewInt64() - for key := range s1 { - result.Insert(key) - } - for key := range s2 { - result.Insert(key) - } - return result + return Int64(cast(s1).Union(cast(s2))) } // Intersection returns a new set which includes the item in BOTH s1 and s2 @@ -126,80 +101,37 @@ func (s1 Int64) Union(s2 Int64) Int64 { // s2 = {a2, a3} // s1.Intersection(s2) = {a2} func (s1 Int64) Intersection(s2 Int64) Int64 { - var walk, other Int64 - result := NewInt64() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result + return Int64(cast(s1).Intersection(cast(s2))) } // IsSuperset returns true if and only if s1 is a superset of s2. func (s1 Int64) IsSuperset(s2 Int64) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true + return cast(s1).IsSuperset(cast(s2)) } // Equal returns true if and only if s1 is equal (as a set) to s2. // Two sets are equal if their membership is identical. // (In practice, this means same elements, order doesn't matter) func (s1 Int64) Equal(s2 Int64) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) + return cast(s1).Equal(cast(s2)) } -type sortableSliceOfInt64 []int64 - -func (s sortableSliceOfInt64) Len() int { return len(s) } -func (s sortableSliceOfInt64) Less(i, j int) bool { return lessInt64(s[i], s[j]) } -func (s sortableSliceOfInt64) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - // List returns the contents as a sorted int64 slice. func (s Int64) List() []int64 { - res := make(sortableSliceOfInt64, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []int64(res) + return List(cast(s)) } // UnsortedList returns the slice with contents in random order. func (s Int64) UnsortedList() []int64 { - res := make([]int64, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res + return cast(s).UnsortedList() } -// Returns a single element from the set. +// PopAny returns a single element from the set. func (s Int64) PopAny() (int64, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue int64 - return zeroValue, false + return cast(s).PopAny() } // Len returns the size of the set. func (s Int64) Len() int { return len(s) } - -func lessInt64(lhs, rhs int64) bool { - return lhs < rhs -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/ordered.go b/vendor/k8s.io/apimachinery/pkg/util/sets/ordered.go new file mode 100644 index 000000000..443dac62e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/ordered.go @@ -0,0 +1,53 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +// ordered is a constraint that permits any ordered type: any type +// that supports the operators < <= >= >. +// If future releases of Go add new ordered types, +// this constraint will be modified to include them. +type ordered interface { + integer | float | ~string +} + +// integer is a constraint that permits any integer type. +// If future releases of Go add new predeclared integer types, +// this constraint will be modified to include them. +type integer interface { + signed | unsigned +} + +// float is a constraint that permits any floating-point type. +// If future releases of Go add new predeclared floating-point types, +// this constraint will be modified to include them. +type float interface { + ~float32 | ~float64 +} + +// signed is a constraint that permits any signed integer type. +// If future releases of Go add new predeclared signed integer types, +// this constraint will be modified to include them. +type signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +// unsigned is a constraint that permits any unsigned integer type. +// If future releases of Go add new predeclared unsigned integer types, +// this constraint will be modified to include them. +type unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go new file mode 100644 index 000000000..d50526f42 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go @@ -0,0 +1,241 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +import ( + "sort" +) + +// Set is a set of the same type elements, implemented via map[comparable]struct{} for minimal memory consumption. +type Set[T comparable] map[T]Empty + +// cast transforms specified set to generic Set[T]. +func cast[T comparable](s map[T]Empty) Set[T] { return s } + +// New creates a Set from a list of values. +// NOTE: type param must be explicitly instantiated if given items are empty. +func New[T comparable](items ...T) Set[T] { + ss := make(Set[T], len(items)) + ss.Insert(items...) + return ss +} + +// KeySet creates a Set from a keys of a map[comparable](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func KeySet[T comparable, V any](theMap map[T]V) Set[T] { + ret := Set[T]{} + for keyValue := range theMap { + ret.Insert(keyValue) + } + return ret +} + +// Insert adds items to the set. +func (s Set[T]) Insert(items ...T) Set[T] { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +func Insert[T comparable](set Set[T], items ...T) Set[T] { + return set.Insert(items...) +} + +// Delete removes all items from the set. +func (s Set[T]) Delete(items ...T) Set[T] { + for _, item := range items { + delete(s, item) + } + return s +} + +// Clear empties the set. +// It is preferable to replace the set with a newly constructed set, +// but not all callers can do that (when there are other references to the map). +// In some cases the set *won't* be fully cleared, e.g. a Set[float32] containing NaN +// can't be cleared because NaN can't be removed. +// For sets containing items of a type that is reflexive for ==, +// this is optimized to a single call to runtime.mapclear(). +func (s Set[T]) Clear() Set[T] { + for key := range s { + delete(s, key) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Set[T]) Has(item T) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Set[T]) HasAll(items ...T) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Set[T]) HasAny(items ...T) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Clone returns a new set which is a copy of the current set. +func (s Set[T]) Clone() Set[T] { + result := make(Set[T], len(s)) + for key := range s { + result.Insert(key) + } + return result +} + +// Difference returns a set of objects that are not in s2. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s1 Set[T]) Difference(s2 Set[T]) Set[T] { + result := New[T]() + for key := range s1 { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.SymmetricDifference(s2) = {a3, a4, a5} +// s2.SymmetricDifference(s1) = {a3, a4, a5} +func (s1 Set[T]) SymmetricDifference(s2 Set[T]) Set[T] { + return s1.Difference(s2).Union(s2.Difference(s1)) +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Set[T]) Union(s2 Set[T]) Set[T] { + result := s1.Clone() + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Set[T]) Intersection(s2 Set[T]) Set[T] { + var walk, other Set[T] + result := New[T]() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Set[T]) IsSuperset(s2 Set[T]) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Set[T]) Equal(s2 Set[T]) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfGeneric[T ordered] []T + +func (g sortableSliceOfGeneric[T]) Len() int { return len(g) } +func (g sortableSliceOfGeneric[T]) Less(i, j int) bool { return less[T](g[i], g[j]) } +func (g sortableSliceOfGeneric[T]) Swap(i, j int) { g[i], g[j] = g[j], g[i] } + +// List returns the contents as a sorted T slice. +// +// This is a separate function and not a method because not all types supported +// by Generic are ordered and only those can be sorted. +func List[T ordered](s Set[T]) []T { + res := make(sortableSliceOfGeneric[T], 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return res +} + +// UnsortedList returns the slice with contents in random order. +func (s Set[T]) UnsortedList() []T { + res := make([]T, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// PopAny returns a single element from the set. +func (s Set[T]) PopAny() (T, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue T + return zeroValue, false +} + +// Len returns the size of the set. +func (s Set[T]) Len() int { + return len(s) +} + +func less[T ordered](lhs, rhs T) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go index e6f37db88..1dab6d13c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,93 +14,75 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. - package sets -import ( - "reflect" - "sort" -) - -// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. +// String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. +// +// Deprecated: use generic Set instead. +// new ways: +// s1 := Set[string]{} +// s2 := New[string]() type String map[string]Empty // NewString creates a String from a list of values. func NewString(items ...string) String { - ss := String{} - ss.Insert(items...) - return ss + return String(New[string](items...)) } // StringKeySet creates a String from a keys of a map[string](? extends interface{}). // If the value passed in is not actually a map, this will panic. -func StringKeySet(theMap interface{}) String { - v := reflect.ValueOf(theMap) - ret := String{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(string)) - } - return ret +func StringKeySet[T any](theMap map[string]T) String { + return String(KeySet(theMap)) } // Insert adds items to the set. func (s String) Insert(items ...string) String { - for _, item := range items { - s[item] = Empty{} - } - return s + return String(cast(s).Insert(items...)) } // Delete removes all items from the set. func (s String) Delete(items ...string) String { - for _, item := range items { - delete(s, item) - } - return s + return String(cast(s).Delete(items...)) } // Has returns true if and only if item is contained in the set. func (s String) Has(item string) bool { - _, contained := s[item] - return contained + return cast(s).Has(item) } // HasAll returns true if and only if all items are contained in the set. func (s String) HasAll(items ...string) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true + return cast(s).HasAll(items...) } // HasAny returns true if any items are contained in the set. func (s String) HasAny(items ...string) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false + return cast(s).HasAny(items...) } -// Difference returns a set of objects that are not in s2 +// Clone returns a new set which is a copy of the current set. +func (s String) Clone() String { + return String(cast(s).Clone()) +} + +// Difference returns a set of objects that are not in s2. // For example: // s1 = {a1, a2, a3} // s2 = {a1, a2, a4, a5} // s1.Difference(s2) = {a3} // s2.Difference(s1) = {a4, a5} -func (s String) Difference(s2 String) String { - result := NewString() - for key := range s { - if !s2.Has(key) { - result.Insert(key) - } - } - return result +func (s1 String) Difference(s2 String) String { + return String(cast(s1).Difference(cast(s2))) +} + +// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.SymmetricDifference(s2) = {a3, a4, a5} +// s2.SymmetricDifference(s1) = {a3, a4, a5} +func (s1 String) SymmetricDifference(s2 String) String { + return String(cast(s1).SymmetricDifference(cast(s2))) } // Union returns a new set which includes items in either s1 or s2. @@ -110,14 +92,7 @@ func (s String) Difference(s2 String) String { // s1.Union(s2) = {a1, a2, a3, a4} // s2.Union(s1) = {a1, a2, a3, a4} func (s1 String) Union(s2 String) String { - result := NewString() - for key := range s1 { - result.Insert(key) - } - for key := range s2 { - result.Insert(key) - } - return result + return String(cast(s1).Union(cast(s2))) } // Intersection returns a new set which includes the item in BOTH s1 and s2 @@ -126,80 +101,37 @@ func (s1 String) Union(s2 String) String { // s2 = {a2, a3} // s1.Intersection(s2) = {a2} func (s1 String) Intersection(s2 String) String { - var walk, other String - result := NewString() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result + return String(cast(s1).Intersection(cast(s2))) } // IsSuperset returns true if and only if s1 is a superset of s2. func (s1 String) IsSuperset(s2 String) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true + return cast(s1).IsSuperset(cast(s2)) } // Equal returns true if and only if s1 is equal (as a set) to s2. // Two sets are equal if their membership is identical. // (In practice, this means same elements, order doesn't matter) func (s1 String) Equal(s2 String) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) + return cast(s1).Equal(cast(s2)) } -type sortableSliceOfString []string - -func (s sortableSliceOfString) Len() int { return len(s) } -func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) } -func (s sortableSliceOfString) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - // List returns the contents as a sorted string slice. func (s String) List() []string { - res := make(sortableSliceOfString, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []string(res) + return List(cast(s)) } // UnsortedList returns the slice with contents in random order. func (s String) UnsortedList() []string { - res := make([]string, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res + return cast(s).UnsortedList() } -// Returns a single element from the set. +// PopAny returns a single element from the set. func (s String) PopAny() (string, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue string - return zeroValue, false + return cast(s).PopAny() } // Len returns the size of the set. func (s String) Len() int { return len(s) } - -func lessString(lhs, rhs string) bool { - return lhs < rhs -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS index 4443bafd1..73244449f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS @@ -1,6 +1,7 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: + - apelisse - pwittrock reviewers: - apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go index d49a56536..df305b712 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go @@ -105,7 +105,7 @@ func (s PatchMetaFromStruct) LookupPatchMetadataForSlice(key string) (LookupPatc // If the underlying element is neither an array nor a slice, the pointer is pointing to a slice, // e.g. https://github.com/kubernetes/kubernetes/blob/bc22e206c79282487ea0bf5696d5ccec7e839a76/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go#L2782-L2822 // If the underlying element is either an array or a slice, return its element type. - case reflect.Ptr: + case reflect.Pointer: t = t.Elem() if t.Kind() == reflect.Array || t.Kind() == reflect.Slice { t = t.Elem() @@ -129,7 +129,7 @@ func getTagStructType(dataStruct interface{}) (reflect.Type, error) { t := reflect.TypeOf(dataStruct) // Get the underlying type for pointers - if t.Kind() == reflect.Ptr { + if t.Kind() == reflect.Pointer { t = t.Elem() } diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go index 6fb369732..920c113bb 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -1106,7 +1106,7 @@ func applyRetainKeysDirective(original, patch map[string]interface{}, options Me // Then, sort them by the relative order in setElementOrder, patch list and live list. // The precedence is $setElementOrder > order in patch list > order in live list. // This function will delete the item after merging it to prevent process it again in the future. -// Ref: https://git.k8s.io/community/contributors/design-proposals/cli/preserve-order-in-strategic-merge-patch.md +// Ref: https://git.k8s.io/design-proposals-archive/cli/preserve-order-in-strategic-merge-patch.md func mergePatchIntoOriginal(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) error { for key, patchV := range patch { // Do nothing if there is no ordering directive @@ -1182,7 +1182,13 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, schema Looku merged = originalFieldValue case !foundOriginal && foundPatch: // list was added - merged = patchFieldValue + v, keep := removeDirectives(patchFieldValue) + if !keep { + // Shouldn't be possible since patchFieldValue is a slice + continue + } + + merged = v.([]interface{}) case foundOriginal && foundPatch: merged, err = mergeSliceHandler(originalList, patchList, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), false, mergeOptions) @@ -1270,6 +1276,42 @@ func partitionMapsByPresentInList(original, partitionBy []interface{}, mergeKey return patch, serverOnly, nil } +// Removes directives from an object and returns value to use instead and whether +// or not the field/index should even be kept +// May modify input +func removeDirectives(obj interface{}) (interface{}, bool) { + if obj == nil { + return obj, true + } else if typedV, ok := obj.(map[string]interface{}); ok { + if _, hasDirective := typedV[directiveMarker]; hasDirective { + return nil, false + } + + for k, v := range typedV { + var keep bool + typedV[k], keep = removeDirectives(v) + if !keep { + delete(typedV, k) + } + } + return typedV, true + } else if typedV, ok := obj.([]interface{}); ok { + var res []interface{} + if typedV != nil { + // Make sure res is non-nil if patch is non-nil + res = []interface{}{} + } + for _, v := range typedV { + if newV, keep := removeDirectives(v); keep { + res = append(res, newV) + } + } + return res, true + } else { + return obj, true + } +} + // Merge fields from a patch map into the original map. Note: This may modify // both the original map and the patch because getting a deep copy of a map in // golang is highly non-trivial. @@ -1333,7 +1375,10 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me if mergeOptions.IgnoreUnmatchedNulls { discardNullValuesFromPatch(patchV) } - original[k] = patchV + original[k], ok = removeDirectives(patchV) + if !ok { + delete(original, k) + } } continue } @@ -1345,7 +1390,10 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me if mergeOptions.IgnoreUnmatchedNulls { discardNullValuesFromPatch(patchV) } - original[k] = patchV + original[k], ok = removeDirectives(patchV) + if !ok { + delete(original, k) + } } continue } @@ -1372,7 +1420,11 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me } original[k], err = mergeSliceHandler(original[k], patchV, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), isDeleteList, mergeOptions) default: - original[k] = patchV + original[k], ok = removeDirectives(patchV) + if !ok { + // if patchV itself is a directive, then don't keep it + delete(original, k) + } } if err != nil { return nil, err @@ -1425,7 +1477,8 @@ func mergeSliceHandler(original, patch interface{}, schema LookupPatchMeta, return nil, err } - if fieldPatchStrategy == mergeDirective { + // Delete lists are handled the same way regardless of what the field's patch strategy is + if fieldPatchStrategy == mergeDirective || isDeleteList { return mergeSlice(typedOriginal, typedPatch, schema, fieldPatchMergeKey, mergeOptions, isDeleteList) } else { return typedPatch, nil diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index b7abf39b5..ae73bda96 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -42,9 +42,9 @@ func (v *Error) Error() string { return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody()) } -type omitValueType struct{} +type OmitValueType struct{} -var omitValue = omitValueType{} +var omitValue = OmitValueType{} // ErrorBody returns the error message without the field name. This is useful // for building nice-looking higher-level error reporting. @@ -66,7 +66,7 @@ func (v *Error) ErrorBody() string { valueType := reflect.TypeOf(value) if value == nil || valueType == nil { value = "null" - } else if valueType.Kind() == reflect.Ptr { + } else if valueType.Kind() == reflect.Pointer { if reflectValue := reflect.ValueOf(value); reflectValue.IsNil() { value = "null" } else { diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index 83df4fb8d..0b8a6cb35 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -191,7 +191,13 @@ func IsDNS1123Label(value string) []string { errs = append(errs, MaxLenError(DNS1123LabelMaxLength)) } if !dns1123LabelRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) + if dns1123SubdomainRegexp.MatchString(value) { + // It was a valid subdomain and not a valid label. Since we + // already checked length, it must be dots. + errs = append(errs, "must not contain dots") + } else { + errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) + } } return errs } @@ -334,7 +340,7 @@ func IsValidPortName(port string) []string { errs = append(errs, "must contain only alpha-numeric characters (a-z, 0-9), and hyphens (-)") } if !portNameOneLetterRegexp.MatchString(port) { - errs = append(errs, "must contain at least one letter or number (a-z, 0-9)") + errs = append(errs, "must contain at least one letter (a-z)") } if strings.Contains(port, "--") { errs = append(errs, "must not contain consecutive hyphens") diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go b/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go new file mode 100644 index 000000000..418761925 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go @@ -0,0 +1,502 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "math" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/utils/clock" +) + +// Backoff holds parameters applied to a Backoff function. +type Backoff struct { + // The initial duration. + Duration time.Duration + // Duration is multiplied by factor each iteration, if factor is not zero + // and the limits imposed by Steps and Cap have not been reached. + // Should not be negative. + // The jitter does not contribute to the updates to the duration parameter. + Factor float64 + // The sleep at each iteration is the duration plus an additional + // amount chosen uniformly at random from the interval between + // zero and `jitter*duration`. + Jitter float64 + // The remaining number of iterations in which the duration + // parameter may change (but progress can be stopped earlier by + // hitting the cap). If not positive, the duration is not + // changed. Used for exponential backoff in combination with + // Factor and Cap. + Steps int + // A limit on revised values of the duration parameter. If a + // multiplication by the factor parameter would make the duration + // exceed the cap then the duration is set to the cap and the + // steps parameter is set to zero. + Cap time.Duration +} + +// Step returns an amount of time to sleep determined by the original +// Duration and Jitter. The backoff is mutated to update its Steps and +// Duration. A nil Backoff always has a zero-duration step. +func (b *Backoff) Step() time.Duration { + if b == nil { + return 0 + } + var nextDuration time.Duration + nextDuration, b.Duration, b.Steps = delay(b.Steps, b.Duration, b.Cap, b.Factor, b.Jitter) + return nextDuration +} + +// DelayFunc returns a function that will compute the next interval to +// wait given the arguments in b. It does not mutate the original backoff +// but the function is safe to use only from a single goroutine. +func (b Backoff) DelayFunc() DelayFunc { + steps := b.Steps + duration := b.Duration + cap := b.Cap + factor := b.Factor + jitter := b.Jitter + + return func() time.Duration { + var nextDuration time.Duration + // jitter is applied per step and is not cumulative over multiple steps + nextDuration, duration, steps = delay(steps, duration, cap, factor, jitter) + return nextDuration + } +} + +// Timer returns a timer implementation appropriate to this backoff's parameters +// for use with wait functions. +func (b Backoff) Timer() Timer { + if b.Steps > 1 || b.Jitter != 0 { + return &variableTimer{new: internalClock.NewTimer, fn: b.DelayFunc()} + } + if b.Duration > 0 { + return &fixedTimer{new: internalClock.NewTicker, interval: b.Duration} + } + return newNoopTimer() +} + +// delay implements the core delay algorithm used in this package. +func delay(steps int, duration, cap time.Duration, factor, jitter float64) (_ time.Duration, next time.Duration, nextSteps int) { + // when steps is non-positive, do not alter the base duration + if steps < 1 { + if jitter > 0 { + return Jitter(duration, jitter), duration, 0 + } + return duration, duration, 0 + } + steps-- + + // calculate the next step's interval + if factor != 0 { + next = time.Duration(float64(duration) * factor) + if cap > 0 && next > cap { + next = cap + steps = 0 + } + } else { + next = duration + } + + // add jitter for this step + if jitter > 0 { + duration = Jitter(duration, jitter) + } + + return duration, next, steps + +} + +// DelayWithReset returns a DelayFunc that will return the appropriate next interval to +// wait. Every resetInterval the backoff parameters are reset to their initial state. +// This method is safe to invoke from multiple goroutines, but all calls will advance +// the backoff state when Factor is set. If Factor is zero, this method is the same as +// invoking b.DelayFunc() since Steps has no impact without Factor. If resetInterval is +// zero no backoff will be performed as the same calling DelayFunc with a zero factor +// and steps. +func (b Backoff) DelayWithReset(c clock.Clock, resetInterval time.Duration) DelayFunc { + if b.Factor <= 0 { + return b.DelayFunc() + } + if resetInterval <= 0 { + b.Steps = 0 + b.Factor = 0 + return b.DelayFunc() + } + return (&backoffManager{ + backoff: b, + initialBackoff: b, + resetInterval: resetInterval, + + clock: c, + lastStart: c.Now(), + timer: nil, + }).Step +} + +// Until loops until stop channel is closed, running f every period. +// +// Until is syntactic sugar on top of JitterUntil with zero jitter factor and +// with sliding = true (which means the timer for period starts after the f +// completes). +func Until(f func(), period time.Duration, stopCh <-chan struct{}) { + JitterUntil(f, period, 0.0, true, stopCh) +} + +// UntilWithContext loops until context is done, running f every period. +// +// UntilWithContext is syntactic sugar on top of JitterUntilWithContext +// with zero jitter factor and with sliding = true (which means the timer +// for period starts after the f completes). +func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { + JitterUntilWithContext(ctx, f, period, 0.0, true) +} + +// NonSlidingUntil loops until stop channel is closed, running f every +// period. +// +// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter +// factor, with sliding = false (meaning the timer for period starts at the same +// time as the function starts). +func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { + JitterUntil(f, period, 0.0, false, stopCh) +} + +// NonSlidingUntilWithContext loops until context is done, running f every +// period. +// +// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext +// with zero jitter factor, with sliding = false (meaning the timer for period +// starts at the same time as the function starts). +func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { + JitterUntilWithContext(ctx, f, period, 0.0, false) +} + +// JitterUntil loops until stop channel is closed, running f every period. +// +// If jitterFactor is positive, the period is jittered before every run of f. +// If jitterFactor is not positive, the period is unchanged and not jittered. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +// +// Close stopCh to stop. f may not be invoked if stop channel is already +// closed. Pass NeverStop to if you don't want it stop. +func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) { + BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh) +} + +// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) { + var t clock.Timer + for { + select { + case <-stopCh: + return + default: + } + + if !sliding { + t = backoff.Backoff() + } + + func() { + defer runtime.HandleCrash() + f() + }() + + if sliding { + t = backoff.Backoff() + } + + // NOTE: b/c there is no priority selection in golang + // it is possible for this to race, meaning we could + // trigger t.C and stopCh, and t.C select falls through. + // In order to mitigate we re-check stopCh at the beginning + // of every loop to prevent extra executions of f(). + select { + case <-stopCh: + if !t.Stop() { + <-t.C() + } + return + case <-t.C(): + } + } +} + +// JitterUntilWithContext loops until context is done, running f every period. +// +// If jitterFactor is positive, the period is jittered before every run of f. +// If jitterFactor is not positive, the period is unchanged and not jittered. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +// +// Cancel context to stop. f may not be invoked if context is already expired. +func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) { + JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done()) +} + +// backoffManager provides simple backoff behavior in a threadsafe manner to a caller. +type backoffManager struct { + backoff Backoff + initialBackoff Backoff + resetInterval time.Duration + + clock clock.Clock + + lock sync.Mutex + lastStart time.Time + timer clock.Timer +} + +// Step returns the expected next duration to wait. +func (b *backoffManager) Step() time.Duration { + b.lock.Lock() + defer b.lock.Unlock() + + switch { + case b.resetInterval == 0: + b.backoff = b.initialBackoff + case b.clock.Now().Sub(b.lastStart) > b.resetInterval: + b.backoff = b.initialBackoff + b.lastStart = b.clock.Now() + } + return b.backoff.Step() +} + +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer +// for exponential backoff. The returned timer must be drained before calling Backoff() the second +// time. +func (b *backoffManager) Backoff() clock.Timer { + b.lock.Lock() + defer b.lock.Unlock() + if b.timer == nil { + b.timer = b.clock.NewTimer(b.Step()) + } else { + b.timer.Reset(b.Step()) + } + return b.timer +} + +// Timer returns a new Timer instance that shares the clock and the reset behavior with all other +// timers. +func (b *backoffManager) Timer() Timer { + return DelayFunc(b.Step).Timer(b.clock) +} + +// BackoffManager manages backoff with a particular scheme based on its underlying implementation. +type BackoffManager interface { + // Backoff returns a shared clock.Timer that is Reset on every invocation. This method is not + // safe for use from multiple threads. It returns a timer for backoff, and caller shall backoff + // until Timer.C() drains. If the second Backoff() is called before the timer from the first + // Backoff() call finishes, the first timer will NOT be drained and result in undetermined + // behavior. + Backoff() clock.Timer +} + +// Deprecated: Will be removed when the legacy polling functions are removed. +type exponentialBackoffManagerImpl struct { + backoff *Backoff + backoffTimer clock.Timer + lastBackoffStart time.Time + initialBackoff time.Duration + backoffResetDuration time.Duration + clock clock.Clock +} + +// NewExponentialBackoffManager returns a manager for managing exponential backoff. Each backoff is jittered and +// backoff will not exceed the given max. If the backoff is not called within resetDuration, the backoff is reset. +// This backoff manager is used to reduce load during upstream unhealthiness. +// +// Deprecated: Will be removed when the legacy Poll methods are removed. Callers should construct a +// Backoff struct, use DelayWithReset() to get a DelayFunc that periodically resets itself, and then +// invoke Timer() when calling wait.BackoffUntil. +// +// Instead of: +// +// bm := wait.NewExponentialBackoffManager(init, max, reset, factor, jitter, clock) +// ... +// wait.BackoffUntil(..., bm.Backoff, ...) +// +// Use: +// +// delayFn := wait.Backoff{ +// Duration: init, +// Cap: max, +// Steps: int(math.Ceil(float64(max) / float64(init))), // now a required argument +// Factor: factor, +// Jitter: jitter, +// }.DelayWithReset(reset, clock) +// wait.BackoffUntil(..., delayFn.Timer(), ...) +func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Duration, backoffFactor, jitter float64, c clock.Clock) BackoffManager { + return &exponentialBackoffManagerImpl{ + backoff: &Backoff{ + Duration: initBackoff, + Factor: backoffFactor, + Jitter: jitter, + + // the current impl of wait.Backoff returns Backoff.Duration once steps are used up, which is not + // what we ideally need here, we set it to max int and assume we will never use up the steps + Steps: math.MaxInt32, + Cap: maxBackoff, + }, + backoffTimer: nil, + initialBackoff: initBackoff, + lastBackoffStart: c.Now(), + backoffResetDuration: resetDuration, + clock: c, + } +} + +func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration { + if b.clock.Now().Sub(b.lastBackoffStart) > b.backoffResetDuration { + b.backoff.Steps = math.MaxInt32 + b.backoff.Duration = b.initialBackoff + } + b.lastBackoffStart = b.clock.Now() + return b.backoff.Step() +} + +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff. +// The returned timer must be drained before calling Backoff() the second time +func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer { + if b.backoffTimer == nil { + b.backoffTimer = b.clock.NewTimer(b.getNextBackoff()) + } else { + b.backoffTimer.Reset(b.getNextBackoff()) + } + return b.backoffTimer +} + +// Deprecated: Will be removed when the legacy polling functions are removed. +type jitteredBackoffManagerImpl struct { + clock clock.Clock + duration time.Duration + jitter float64 + backoffTimer clock.Timer +} + +// NewJitteredBackoffManager returns a BackoffManager that backoffs with given duration plus given jitter. If the jitter +// is negative, backoff will not be jittered. +// +// Deprecated: Will be removed when the legacy Poll methods are removed. Callers should construct a +// Backoff struct and invoke Timer() when calling wait.BackoffUntil. +// +// Instead of: +// +// bm := wait.NewJitteredBackoffManager(duration, jitter, clock) +// ... +// wait.BackoffUntil(..., bm.Backoff, ...) +// +// Use: +// +// wait.BackoffUntil(..., wait.Backoff{Duration: duration, Jitter: jitter}.Timer(), ...) +func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.Clock) BackoffManager { + return &jitteredBackoffManagerImpl{ + clock: c, + duration: duration, + jitter: jitter, + backoffTimer: nil, + } +} + +func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration { + jitteredPeriod := j.duration + if j.jitter > 0.0 { + jitteredPeriod = Jitter(j.duration, j.jitter) + } + return jitteredPeriod +} + +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff. +// The returned timer must be drained before calling Backoff() the second time +func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer { + backoff := j.getNextBackoff() + if j.backoffTimer == nil { + j.backoffTimer = j.clock.NewTimer(backoff) + } else { + j.backoffTimer.Reset(backoff) + } + return j.backoffTimer +} + +// ExponentialBackoff repeats a condition check with exponential backoff. +// +// It repeatedly checks the condition and then sleeps, using `backoff.Step()` +// to determine the length of the sleep and adjust Duration and Steps. +// Stops and returns as soon as: +// 1. the condition check returns true or an error, +// 2. `backoff.Steps` checks of the condition have been done, or +// 3. a sleep truncated by the cap on duration has been completed. +// In case (1) the returned error is what the condition function returned. +// In all other cases, ErrWaitTimeout is returned. +// +// Since backoffs are often subject to cancellation, we recommend using +// ExponentialBackoffWithContext and passing a context to the method. +func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { + for backoff.Steps > 0 { + if ok, err := runConditionWithCrashProtection(condition); err != nil || ok { + return err + } + if backoff.Steps == 1 { + break + } + time.Sleep(backoff.Step()) + } + return ErrWaitTimeout +} + +// ExponentialBackoffWithContext repeats a condition check with exponential backoff. +// It immediately returns an error if the condition returns an error, the context is cancelled +// or hits the deadline, or if the maximum attempts defined in backoff is exceeded (ErrWaitTimeout). +// If an error is returned by the condition the backoff stops immediately. The condition will +// never be invoked more than backoff.Steps times. +func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionWithContextFunc) error { + for backoff.Steps > 0 { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if ok, err := runConditionWithCrashProtectionWithContext(ctx, condition); err != nil || ok { + return err + } + + if backoff.Steps == 1 { + break + } + + waitBeforeRetry := backoff.Step() + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(waitBeforeRetry): + } + } + + return ErrWaitTimeout +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/delay.go b/vendor/k8s.io/apimachinery/pkg/util/wait/delay.go new file mode 100644 index 000000000..1d3dcaa74 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/delay.go @@ -0,0 +1,51 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "sync" + "time" + + "k8s.io/utils/clock" +) + +// DelayFunc returns the next time interval to wait. +type DelayFunc func() time.Duration + +// Timer takes an arbitrary delay function and returns a timer that can handle arbitrary interval changes. +// Use Backoff{...}.Timer() for simple delays and more efficient timers. +func (fn DelayFunc) Timer(c clock.Clock) Timer { + return &variableTimer{fn: fn, new: c.NewTimer} +} + +// Until takes an arbitrary delay function and runs until cancelled or the condition indicates exit. This +// offers all of the functionality of the methods in this package. +func (fn DelayFunc) Until(ctx context.Context, immediate, sliding bool, condition ConditionWithContextFunc) error { + return loopConditionUntilContext(ctx, &variableTimer{fn: fn, new: internalClock.NewTimer}, immediate, sliding, condition) +} + +// Concurrent returns a version of this DelayFunc that is safe for use by multiple goroutines that +// wish to share a single delay timer. +func (fn DelayFunc) Concurrent() DelayFunc { + var lock sync.Mutex + return func() time.Duration { + lock.Lock() + defer lock.Unlock() + return fn() + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/error.go b/vendor/k8s.io/apimachinery/pkg/util/wait/error.go new file mode 100644 index 000000000..dd75801d8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/error.go @@ -0,0 +1,96 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "errors" +) + +// ErrWaitTimeout is returned when the condition was not satisfied in time. +// +// Deprecated: This type will be made private in favor of Interrupted() +// for checking errors or ErrorInterrupted(err) for returning a wrapped error. +var ErrWaitTimeout = ErrorInterrupted(errors.New("timed out waiting for the condition")) + +// Interrupted returns true if the error indicates a Poll, ExponentialBackoff, or +// Until loop exited for any reason besides the condition returning true or an +// error. A loop is considered interrupted if the calling context is cancelled, +// the context reaches its deadline, or a backoff reaches its maximum allowed +// steps. +// +// Callers should use this method instead of comparing the error value directly to +// ErrWaitTimeout, as methods that cancel a context may not return that error. +// +// Instead of: +// +// err := wait.Poll(...) +// if err == wait.ErrWaitTimeout { +// log.Infof("Wait for operation exceeded") +// } else ... +// +// Use: +// +// err := wait.Poll(...) +// if wait.Interrupted(err) { +// log.Infof("Wait for operation exceeded") +// } else ... +func Interrupted(err error) bool { + switch { + case errors.Is(err, errWaitTimeout), + errors.Is(err, context.Canceled), + errors.Is(err, context.DeadlineExceeded): + return true + default: + return false + } +} + +// errInterrupted +type errInterrupted struct { + cause error +} + +// ErrorInterrupted returns an error that indicates the wait was ended +// early for a given reason. If no cause is provided a generic error +// will be used but callers are encouraged to provide a real cause for +// clarity in debugging. +func ErrorInterrupted(cause error) error { + switch cause.(type) { + case errInterrupted: + // no need to wrap twice since errInterrupted is only needed + // once in a chain + return cause + default: + return errInterrupted{cause} + } +} + +// errWaitTimeout is the private version of the previous ErrWaitTimeout +// and is private to prevent direct comparison. Use ErrorInterrupted(err) +// to get an error that will return true for Interrupted(err). +var errWaitTimeout = errInterrupted{} + +func (e errInterrupted) Unwrap() error { return e.cause } +func (e errInterrupted) Is(target error) bool { return target == errWaitTimeout } +func (e errInterrupted) Error() string { + if e.cause == nil { + // returns the same error message as historical behavior + return "timed out waiting for the condition" + } + return e.cause.Error() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go new file mode 100644 index 000000000..0dd13c626 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go @@ -0,0 +1,97 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/util/runtime" +) + +// loopConditionUntilContext executes the provided condition at intervals defined by +// the provided timer until the provided context is cancelled, the condition returns +// true, or the condition returns an error. If sliding is true, the period is computed +// after condition runs. If it is false then period includes the runtime for condition. +// If immediate is false the first delay happens before any call to condition, if +// immediate is true the condition will be invoked before waiting and guarantees that +// the condition is invoked at least once, regardless of whether the context has been +// cancelled. The returned error is the error returned by the last condition or the +// context error if the context was terminated. +// +// This is the common loop construct for all polling in the wait package. +func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding bool, condition ConditionWithContextFunc) error { + defer t.Stop() + + var timeCh <-chan time.Time + doneCh := ctx.Done() + + // if immediate is true the condition is + // guaranteed to be executed at least once, + // if we haven't requested immediate execution, delay once + if immediate { + if ok, err := func() (bool, error) { + defer runtime.HandleCrash() + return condition(ctx) + }(); err != nil || ok { + return err + } + } else { + timeCh = t.C() + select { + case <-doneCh: + return ctx.Err() + case <-timeCh: + } + } + + for { + // checking ctx.Err() is slightly faster than checking a select + if err := ctx.Err(); err != nil { + return err + } + + if !sliding { + t.Next() + } + if ok, err := func() (bool, error) { + defer runtime.HandleCrash() + return condition(ctx) + }(); err != nil || ok { + return err + } + if sliding { + t.Next() + } + + if timeCh == nil { + timeCh = t.C() + } + + // NOTE: b/c there is no priority selection in golang + // it is possible for this to race, meaning we could + // trigger t.C and doneCh, and t.C select falls through. + // In order to mitigate we re-check doneCh at the beginning + // of every loop to guarantee at-most one extra execution + // of condition. + select { + case <-doneCh: + return ctx.Err() + case <-timeCh: + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go b/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go new file mode 100644 index 000000000..231d4c384 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go @@ -0,0 +1,315 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "time" +) + +// PollUntilContextCancel tries a condition func until it returns true, an error, or the context +// is cancelled or hits a deadline. condition will be invoked after the first interval if the +// context is not cancelled first. The returned error will be from ctx.Err(), the condition's +// err return value, or nil. If invoking condition takes longer than interval the next condition +// will be invoked immediately. When using very short intervals, condition may be invoked multiple +// times before a context cancellation is detected. If immediate is true, condition will be +// invoked before waiting and guarantees that condition is invoked at least once, regardless of +// whether the context has been cancelled. +func PollUntilContextCancel(ctx context.Context, interval time.Duration, immediate bool, condition ConditionWithContextFunc) error { + return loopConditionUntilContext(ctx, Backoff{Duration: interval}.Timer(), immediate, false, condition) +} + +// PollUntilContextTimeout will terminate polling after timeout duration by setting a context +// timeout. This is provided as a convenience function for callers not currently executing under +// a deadline and is equivalent to: +// +// deadlineCtx, deadlineCancel := context.WithTimeout(ctx, timeout) +// err := PollUntilContextCancel(deadlineCtx, interval, immediate, condition) +// +// The deadline context will be cancelled if the Poll succeeds before the timeout, simplifying +// inline usage. All other behavior is identical to PollUntilContextCancel. +func PollUntilContextTimeout(ctx context.Context, interval, timeout time.Duration, immediate bool, condition ConditionWithContextFunc) error { + deadlineCtx, deadlineCancel := context.WithTimeout(ctx, timeout) + defer deadlineCancel() + return loopConditionUntilContext(deadlineCtx, Backoff{Duration: interval}.Timer(), immediate, false, condition) +} + +// Poll tries a condition func until it returns true, an error, or the timeout +// is reached. +// +// Poll always waits the interval before the run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to Poll something forever, see PollInfinite. +// +// Deprecated: This method does not return errors from context, use PollUntilContextTimeout. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func Poll(interval, timeout time.Duration, condition ConditionFunc) error { + return PollWithContext(context.Background(), interval, timeout, condition.WithContext()) +} + +// PollWithContext tries a condition func until it returns true, an error, +// or when the context expires or the timeout is reached, whichever +// happens first. +// +// PollWithContext always waits the interval before the run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to Poll something forever, see PollInfinite. +// +// Deprecated: This method does not return errors from context, use PollUntilContextTimeout. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, timeout), condition) +} + +// PollUntil tries a condition func until it returns true, an error or stopCh is +// closed. +// +// PollUntil always waits interval before the first run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { + return PollUntilWithContext(ContextForChannel(stopCh), interval, condition.WithContext()) +} + +// PollUntilWithContext tries a condition func until it returns true, +// an error or the specified context is cancelled or expired. +// +// PollUntilWithContext always waits interval before the first run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, 0), condition) +} + +// PollInfinite tries a condition func until it returns true or an error +// +// PollInfinite always waits the interval before the run of 'condition'. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollInfinite(interval time.Duration, condition ConditionFunc) error { + return PollInfiniteWithContext(context.Background(), interval, condition.WithContext()) +} + +// PollInfiniteWithContext tries a condition func until it returns true or an error +// +// PollInfiniteWithContext always waits the interval before the run of 'condition'. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, 0), condition) +} + +// PollImmediate tries a condition func until it returns true, an error, or the timeout +// is reached. +// +// PollImmediate always checks 'condition' before waiting for the interval. 'condition' +// will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to immediately Poll something forever, see PollImmediateInfinite. +// +// Deprecated: This method does not return errors from context, use PollUntilContextTimeout. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { + return PollImmediateWithContext(context.Background(), interval, timeout, condition.WithContext()) +} + +// PollImmediateWithContext tries a condition func until it returns true, an error, +// or the timeout is reached or the specified context expires, whichever happens first. +// +// PollImmediateWithContext always checks 'condition' before waiting for the interval. +// 'condition' will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to immediately Poll something forever, see PollImmediateInfinite. +// +// Deprecated: This method does not return errors from context, use PollUntilContextTimeout. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, timeout), condition) +} + +// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed. +// +// PollImmediateUntil runs the 'condition' before waiting for the interval. +// 'condition' will always be invoked at least once. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { + return PollImmediateUntilWithContext(ContextForChannel(stopCh), interval, condition.WithContext()) +} + +// PollImmediateUntilWithContext tries a condition func until it returns true, +// an error or the specified context is cancelled or expired. +// +// PollImmediateUntilWithContext runs the 'condition' before waiting for the interval. +// 'condition' will always be invoked at least once. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, 0), condition) +} + +// PollImmediateInfinite tries a condition func until it returns true or an error +// +// PollImmediateInfinite runs the 'condition' before waiting for the interval. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error { + return PollImmediateInfiniteWithContext(context.Background(), interval, condition.WithContext()) +} + +// PollImmediateInfiniteWithContext tries a condition func until it returns true +// or an error or the specified context gets cancelled or expired. +// +// PollImmediateInfiniteWithContext runs the 'condition' before waiting for the interval. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, 0), condition) +} + +// Internally used, each of the public 'Poll*' function defined in this +// package should invoke this internal function with appropriate parameters. +// ctx: the context specified by the caller, for infinite polling pass +// a context that never gets cancelled or expired. +// immediate: if true, the 'condition' will be invoked before waiting for the interval, +// in this case 'condition' will always be invoked at least once. +// wait: user specified WaitFunc function that controls at what interval the condition +// function should be invoked periodically and whether it is bound by a timeout. +// condition: user specified ConditionWithContextFunc function. +// +// Deprecated: will be removed in favor of loopConditionUntilContext. +func poll(ctx context.Context, immediate bool, wait waitWithContextFunc, condition ConditionWithContextFunc) error { + if immediate { + done, err := runConditionWithCrashProtectionWithContext(ctx, condition) + if err != nil { + return err + } + if done { + return nil + } + } + + select { + case <-ctx.Done(): + // returning ctx.Err() will break backward compatibility, use new PollUntilContext* + // methods instead + return ErrWaitTimeout + default: + return waitForWithContext(ctx, wait, condition) + } +} + +// poller returns a WaitFunc that will send to the channel every interval until +// timeout has elapsed and then closes the channel. +// +// Over very short intervals you may receive no ticks before the channel is +// closed. A timeout of 0 is interpreted as an infinity, and in such a case +// it would be the caller's responsibility to close the done channel. +// Failure to do so would result in a leaked goroutine. +// +// Output ticks are not buffered. If the channel is not ready to receive an +// item, the tick is skipped. +// +// Deprecated: Will be removed in a future release. +func poller(interval, timeout time.Duration) waitWithContextFunc { + return waitWithContextFunc(func(ctx context.Context) <-chan struct{} { + ch := make(chan struct{}) + + go func() { + defer close(ch) + + tick := time.NewTicker(interval) + defer tick.Stop() + + var after <-chan time.Time + if timeout != 0 { + // time.After is more convenient, but it + // potentially leaves timers around much longer + // than necessary if we exit early. + timer := time.NewTimer(timeout) + after = timer.C + defer timer.Stop() + } + + for { + select { + case <-tick.C: + // If the consumer isn't ready for this signal drop it and + // check the other channels. + select { + case ch <- struct{}{}: + default: + } + case <-after: + return + case <-ctx.Done(): + return + } + } + }() + + return ch + }) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/timer.go b/vendor/k8s.io/apimachinery/pkg/util/wait/timer.go new file mode 100644 index 000000000..3efba3213 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/timer.go @@ -0,0 +1,121 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "time" + + "k8s.io/utils/clock" +) + +// Timer abstracts how wait functions interact with time runtime efficiently. Test +// code may implement this interface directly but package consumers are encouraged +// to use the Backoff type as the primary mechanism for acquiring a Timer. The +// interface is a simplification of clock.Timer to prevent misuse. Timers are not +// expected to be safe for calls from multiple goroutines. +type Timer interface { + // C returns a channel that will receive a struct{} each time the timer fires. + // The channel should not be waited on after Stop() is invoked. It is allowed + // to cache the returned value of C() for the lifetime of the Timer. + C() <-chan time.Time + // Next is invoked by wait functions to signal timers that the next interval + // should begin. You may only use Next() if you have drained the channel C(). + // You should not call Next() after Stop() is invoked. + Next() + // Stop releases the timer. It is safe to invoke if no other methods have been + // called. + Stop() +} + +type noopTimer struct { + closedCh <-chan time.Time +} + +// newNoopTimer creates a timer with a unique channel to avoid contention +// for the channel's lock across multiple unrelated timers. +func newNoopTimer() noopTimer { + ch := make(chan time.Time) + close(ch) + return noopTimer{closedCh: ch} +} + +func (t noopTimer) C() <-chan time.Time { + return t.closedCh +} +func (noopTimer) Next() {} +func (noopTimer) Stop() {} + +type variableTimer struct { + fn DelayFunc + t clock.Timer + new func(time.Duration) clock.Timer +} + +func (t *variableTimer) C() <-chan time.Time { + if t.t == nil { + d := t.fn() + t.t = t.new(d) + } + return t.t.C() +} +func (t *variableTimer) Next() { + if t.t == nil { + return + } + d := t.fn() + t.t.Reset(d) +} +func (t *variableTimer) Stop() { + if t.t == nil { + return + } + t.t.Stop() + t.t = nil +} + +type fixedTimer struct { + interval time.Duration + t clock.Ticker + new func(time.Duration) clock.Ticker +} + +func (t *fixedTimer) C() <-chan time.Time { + if t.t == nil { + t.t = t.new(t.interval) + } + return t.t.C() +} +func (t *fixedTimer) Next() { + // no-op for fixed timers +} +func (t *fixedTimer) Stop() { + if t.t == nil { + return + } + t.t.Stop() + t.t = nil +} + +var ( + // RealTimer can be passed to methods that need a clock.Timer. + RealTimer = clock.RealClock{}.NewTimer +) + +var ( + // internalClock is used for test injection of clocks + internalClock = clock.RealClock{} +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index ec5be90b8..6805e8cf9 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -18,20 +18,19 @@ package wait import ( "context" - "errors" - "math" "math/rand" "sync" "time" "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/utils/clock" ) // For any test of the style: -// ... -// <- time.After(timeout): -// t.Errorf("Timed out") +// +// ... +// <- time.After(timeout): +// t.Errorf("Timed out") +// // The value for timeout should effectively be "forever." Obviously we don't want our tests to truly lock up forever, but 30s // is long enough that it is effectively forever for the things that can slow down a run on a heavily contended machine // (GC, seeks, etc), but not so long as to make a developer ctrl-c a test run if they do happen to break that test. @@ -81,113 +80,6 @@ func Forever(f func(), period time.Duration) { Until(f, period, NeverStop) } -// Until loops until stop channel is closed, running f every period. -// -// Until is syntactic sugar on top of JitterUntil with zero jitter factor and -// with sliding = true (which means the timer for period starts after the f -// completes). -func Until(f func(), period time.Duration, stopCh <-chan struct{}) { - JitterUntil(f, period, 0.0, true, stopCh) -} - -// UntilWithContext loops until context is done, running f every period. -// -// UntilWithContext is syntactic sugar on top of JitterUntilWithContext -// with zero jitter factor and with sliding = true (which means the timer -// for period starts after the f completes). -func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { - JitterUntilWithContext(ctx, f, period, 0.0, true) -} - -// NonSlidingUntil loops until stop channel is closed, running f every -// period. -// -// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter -// factor, with sliding = false (meaning the timer for period starts at the same -// time as the function starts). -func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { - JitterUntil(f, period, 0.0, false, stopCh) -} - -// NonSlidingUntilWithContext loops until context is done, running f every -// period. -// -// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext -// with zero jitter factor, with sliding = false (meaning the timer for period -// starts at the same time as the function starts). -func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { - JitterUntilWithContext(ctx, f, period, 0.0, false) -} - -// JitterUntil loops until stop channel is closed, running f every period. -// -// If jitterFactor is positive, the period is jittered before every run of f. -// If jitterFactor is not positive, the period is unchanged and not jittered. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -// -// Close stopCh to stop. f may not be invoked if stop channel is already -// closed. Pass NeverStop to if you don't want it stop. -func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) { - BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh) -} - -// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) { - var t clock.Timer - for { - select { - case <-stopCh: - return - default: - } - - if !sliding { - t = backoff.Backoff() - } - - func() { - defer runtime.HandleCrash() - f() - }() - - if sliding { - t = backoff.Backoff() - } - - // NOTE: b/c there is no priority selection in golang - // it is possible for this to race, meaning we could - // trigger t.C and stopCh, and t.C select falls through. - // In order to mitigate we re-check stopCh at the beginning - // of every loop to prevent extra executions of f(). - select { - case <-stopCh: - if !t.Stop() { - <-t.C() - } - return - case <-t.C(): - } - } -} - -// JitterUntilWithContext loops until context is done, running f every period. -// -// If jitterFactor is positive, the period is jittered before every run of f. -// If jitterFactor is not positive, the period is unchanged and not jittered. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -// -// Cancel context to stop. f may not be invoked if context is already expired. -func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) { - JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done()) -} - // Jitter returns a time.Duration between duration and duration + maxFactor * // duration. // @@ -201,9 +93,6 @@ func Jitter(duration time.Duration, maxFactor float64) time.Duration { return wait } -// ErrWaitTimeout is returned when the condition exited without success. -var ErrWaitTimeout = errors.New("timed out waiting for the condition") - // ConditionFunc returns true if the condition is satisfied, or an error // if the loop should be aborted. type ConditionFunc func() (done bool, err error) @@ -221,425 +110,80 @@ func (cf ConditionFunc) WithContext() ConditionWithContextFunc { } } -// runConditionWithCrashProtection runs a ConditionFunc with crash protection -func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) { - return runConditionWithCrashProtectionWithContext(context.TODO(), condition.WithContext()) -} - -// runConditionWithCrashProtectionWithContext runs a -// ConditionWithContextFunc with crash protection. -func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) { - defer runtime.HandleCrash() - return condition(ctx) +// ContextForChannel provides a context that will be treated as cancelled +// when the provided parentCh is closed. The implementation returns +// context.Canceled for Err() if and only if the parentCh is closed. +func ContextForChannel(parentCh <-chan struct{}) context.Context { + return channelContext{stopCh: parentCh} } -// Backoff holds parameters applied to a Backoff function. -type Backoff struct { - // The initial duration. - Duration time.Duration - // Duration is multiplied by factor each iteration, if factor is not zero - // and the limits imposed by Steps and Cap have not been reached. - // Should not be negative. - // The jitter does not contribute to the updates to the duration parameter. - Factor float64 - // The sleep at each iteration is the duration plus an additional - // amount chosen uniformly at random from the interval between - // zero and `jitter*duration`. - Jitter float64 - // The remaining number of iterations in which the duration - // parameter may change (but progress can be stopped earlier by - // hitting the cap). If not positive, the duration is not - // changed. Used for exponential backoff in combination with - // Factor and Cap. - Steps int - // A limit on revised values of the duration parameter. If a - // multiplication by the factor parameter would make the duration - // exceed the cap then the duration is set to the cap and the - // steps parameter is set to zero. - Cap time.Duration -} +var _ context.Context = channelContext{} -// Step (1) returns an amount of time to sleep determined by the -// original Duration and Jitter and (2) mutates the provided Backoff -// to update its Steps and Duration. -func (b *Backoff) Step() time.Duration { - if b.Steps < 1 { - if b.Jitter > 0 { - return Jitter(b.Duration, b.Jitter) - } - return b.Duration - } - b.Steps-- - - duration := b.Duration - - // calculate the next step - if b.Factor != 0 { - b.Duration = time.Duration(float64(b.Duration) * b.Factor) - if b.Cap > 0 && b.Duration > b.Cap { - b.Duration = b.Cap - b.Steps = 0 - } - } - - if b.Jitter > 0 { - duration = Jitter(duration, b.Jitter) - } - return duration -} - -// contextForChannel derives a child context from a parent channel. -// -// The derived context's Done channel is closed when the returned cancel function -// is called or when the parent channel is closed, whichever happens first. -// -// Note the caller must *always* call the CancelFunc, otherwise resources may be leaked. -func contextForChannel(parentCh <-chan struct{}) (context.Context, context.CancelFunc) { - ctx, cancel := context.WithCancel(context.Background()) - - go func() { - select { - case <-parentCh: - cancel() - case <-ctx.Done(): - } - }() - return ctx, cancel -} - -// BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides -// an interface to return a timer for backoff, and caller shall backoff until Timer.C() drains. If the second Backoff() -// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained and result in -// undetermined behavior. -// The BackoffManager is supposed to be called in a single-threaded environment. -type BackoffManager interface { - Backoff() clock.Timer -} - -type exponentialBackoffManagerImpl struct { - backoff *Backoff - backoffTimer clock.Timer - lastBackoffStart time.Time - initialBackoff time.Duration - backoffResetDuration time.Duration - clock clock.Clock -} - -// NewExponentialBackoffManager returns a manager for managing exponential backoff. Each backoff is jittered and -// backoff will not exceed the given max. If the backoff is not called within resetDuration, the backoff is reset. -// This backoff manager is used to reduce load during upstream unhealthiness. -func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Duration, backoffFactor, jitter float64, c clock.Clock) BackoffManager { - return &exponentialBackoffManagerImpl{ - backoff: &Backoff{ - Duration: initBackoff, - Factor: backoffFactor, - Jitter: jitter, - - // the current impl of wait.Backoff returns Backoff.Duration once steps are used up, which is not - // what we ideally need here, we set it to max int and assume we will never use up the steps - Steps: math.MaxInt32, - Cap: maxBackoff, - }, - backoffTimer: nil, - initialBackoff: initBackoff, - lastBackoffStart: c.Now(), - backoffResetDuration: resetDuration, - clock: c, - } -} - -func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration { - if b.clock.Now().Sub(b.lastBackoffStart) > b.backoffResetDuration { - b.backoff.Steps = math.MaxInt32 - b.backoff.Duration = b.initialBackoff - } - b.lastBackoffStart = b.clock.Now() - return b.backoff.Step() -} - -// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff. -// The returned timer must be drained before calling Backoff() the second time -func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer { - if b.backoffTimer == nil { - b.backoffTimer = b.clock.NewTimer(b.getNextBackoff()) - } else { - b.backoffTimer.Reset(b.getNextBackoff()) - } - return b.backoffTimer -} - -type jitteredBackoffManagerImpl struct { - clock clock.Clock - duration time.Duration - jitter float64 - backoffTimer clock.Timer -} - -// NewJitteredBackoffManager returns a BackoffManager that backoffs with given duration plus given jitter. If the jitter -// is negative, backoff will not be jittered. -func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.Clock) BackoffManager { - return &jitteredBackoffManagerImpl{ - clock: c, - duration: duration, - jitter: jitter, - backoffTimer: nil, - } -} - -func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration { - jitteredPeriod := j.duration - if j.jitter > 0.0 { - jitteredPeriod = Jitter(j.duration, j.jitter) - } - return jitteredPeriod -} - -// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff. -// The returned timer must be drained before calling Backoff() the second time -func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer { - backoff := j.getNextBackoff() - if j.backoffTimer == nil { - j.backoffTimer = j.clock.NewTimer(backoff) - } else { - j.backoffTimer.Reset(backoff) - } - return j.backoffTimer -} - -// ExponentialBackoff repeats a condition check with exponential backoff. -// -// It repeatedly checks the condition and then sleeps, using `backoff.Step()` -// to determine the length of the sleep and adjust Duration and Steps. -// Stops and returns as soon as: -// 1. the condition check returns true or an error, -// 2. `backoff.Steps` checks of the condition have been done, or -// 3. a sleep truncated by the cap on duration has been completed. -// In case (1) the returned error is what the condition function returned. -// In all other cases, ErrWaitTimeout is returned. -func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { - for backoff.Steps > 0 { - if ok, err := runConditionWithCrashProtection(condition); err != nil || ok { - return err - } - if backoff.Steps == 1 { - break - } - time.Sleep(backoff.Step()) - } - return ErrWaitTimeout -} - -// Poll tries a condition func until it returns true, an error, or the timeout -// is reached. -// -// Poll always waits the interval before the run of 'condition'. -// 'condition' will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to Poll something forever, see PollInfinite. -func Poll(interval, timeout time.Duration, condition ConditionFunc) error { - return PollWithContext(context.Background(), interval, timeout, condition.WithContext()) -} - -// PollWithContext tries a condition func until it returns true, an error, -// or when the context expires or the timeout is reached, whichever -// happens first. -// -// PollWithContext always waits the interval before the run of 'condition'. -// 'condition' will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to Poll something forever, see PollInfinite. -func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, false, poller(interval, timeout), condition) -} - -// PollUntil tries a condition func until it returns true, an error or stopCh is +// channelContext will behave as if the context were cancelled when stopCh is // closed. -// -// PollUntil always waits interval before the first run of 'condition'. -// 'condition' will always be invoked at least once. -func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - ctx, cancel := contextForChannel(stopCh) - defer cancel() - return PollUntilWithContext(ctx, interval, condition.WithContext()) -} - -// PollUntilWithContext tries a condition func until it returns true, -// an error or the specified context is cancelled or expired. -// -// PollUntilWithContext always waits interval before the first run of 'condition'. -// 'condition' will always be invoked at least once. -func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, false, poller(interval, 0), condition) -} - -// PollInfinite tries a condition func until it returns true or an error -// -// PollInfinite always waits the interval before the run of 'condition'. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollInfinite(interval time.Duration, condition ConditionFunc) error { - return PollInfiniteWithContext(context.Background(), interval, condition.WithContext()) -} - -// PollInfiniteWithContext tries a condition func until it returns true or an error -// -// PollInfiniteWithContext always waits the interval before the run of 'condition'. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, false, poller(interval, 0), condition) -} - -// PollImmediate tries a condition func until it returns true, an error, or the timeout -// is reached. -// -// PollImmediate always checks 'condition' before waiting for the interval. 'condition' -// will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to immediately Poll something forever, see PollImmediateInfinite. -func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { - return PollImmediateWithContext(context.Background(), interval, timeout, condition.WithContext()) -} - -// PollImmediateWithContext tries a condition func until it returns true, an error, -// or the timeout is reached or the specified context expires, whichever happens first. -// -// PollImmediateWithContext always checks 'condition' before waiting for the interval. -// 'condition' will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to immediately Poll something forever, see PollImmediateInfinite. -func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, true, poller(interval, timeout), condition) -} - -// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed. -// -// PollImmediateUntil runs the 'condition' before waiting for the interval. -// 'condition' will always be invoked at least once. -func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - ctx, cancel := contextForChannel(stopCh) - defer cancel() - return PollImmediateUntilWithContext(ctx, interval, condition.WithContext()) +type channelContext struct { + stopCh <-chan struct{} } -// PollImmediateUntilWithContext tries a condition func until it returns true, -// an error or the specified context is cancelled or expired. -// -// PollImmediateUntilWithContext runs the 'condition' before waiting for the interval. -// 'condition' will always be invoked at least once. -func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, true, poller(interval, 0), condition) +func (c channelContext) Done() <-chan struct{} { return c.stopCh } +func (c channelContext) Err() error { + select { + case <-c.stopCh: + return context.Canceled + default: + return nil + } } +func (c channelContext) Deadline() (time.Time, bool) { return time.Time{}, false } +func (c channelContext) Value(key any) any { return nil } -// PollImmediateInfinite tries a condition func until it returns true or an error -// -// PollImmediateInfinite runs the 'condition' before waiting for the interval. +// runConditionWithCrashProtection runs a ConditionFunc with crash protection. // -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error { - return PollImmediateInfiniteWithContext(context.Background(), interval, condition.WithContext()) +// Deprecated: Will be removed when the legacy polling methods are removed. +func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) { + defer runtime.HandleCrash() + return condition() } -// PollImmediateInfiniteWithContext tries a condition func until it returns true -// or an error or the specified context gets cancelled or expired. -// -// PollImmediateInfiniteWithContext runs the 'condition' before waiting for the interval. +// runConditionWithCrashProtectionWithContext runs a ConditionWithContextFunc +// with crash protection. // -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, true, poller(interval, 0), condition) -} - -// Internally used, each of the the public 'Poll*' function defined in this -// package should invoke this internal function with appropriate parameters. -// ctx: the context specified by the caller, for infinite polling pass -// a context that never gets cancelled or expired. -// immediate: if true, the 'condition' will be invoked before waiting for the interval, -// in this case 'condition' will always be invoked at least once. -// wait: user specified WaitFunc function that controls at what interval the condition -// function should be invoked periodically and whether it is bound by a timeout. -// condition: user specified ConditionWithContextFunc function. -func poll(ctx context.Context, immediate bool, wait WaitWithContextFunc, condition ConditionWithContextFunc) error { - if immediate { - done, err := runConditionWithCrashProtectionWithContext(ctx, condition) - if err != nil { - return err - } - if done { - return nil - } - } - - select { - case <-ctx.Done(): - // returning ctx.Err() will break backward compatibility - return ErrWaitTimeout - default: - return WaitForWithContext(ctx, wait, condition) - } +// Deprecated: Will be removed when the legacy polling methods are removed. +func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) { + defer runtime.HandleCrash() + return condition(ctx) } -// WaitFunc creates a channel that receives an item every time a test +// waitFunc creates a channel that receives an item every time a test // should be executed and is closed when the last test should be invoked. -type WaitFunc func(done <-chan struct{}) <-chan struct{} +// +// Deprecated: Will be removed in a future release in favor of +// loopConditionUntilContext. +type waitFunc func(done <-chan struct{}) <-chan struct{} // WithContext converts the WaitFunc to an equivalent WaitWithContextFunc -func (w WaitFunc) WithContext() WaitWithContextFunc { +func (w waitFunc) WithContext() waitWithContextFunc { return func(ctx context.Context) <-chan struct{} { return w(ctx.Done()) } } -// WaitWithContextFunc creates a channel that receives an item every time a test +// waitWithContextFunc creates a channel that receives an item every time a test // should be executed and is closed when the last test should be invoked. // // When the specified context gets cancelled or expires the function // stops sending item and returns immediately. -type WaitWithContextFunc func(ctx context.Context) <-chan struct{} - -// WaitFor continually checks 'fn' as driven by 'wait'. -// -// WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value -// placed on the channel and once more when the channel is closed. If the channel is closed -// and 'fn' returns false without error, WaitFor returns ErrWaitTimeout. -// -// If 'fn' returns an error the loop ends and that error is returned. If -// 'fn' returns true the loop ends and nil is returned. -// -// ErrWaitTimeout will be returned if the 'done' channel is closed without fn ever -// returning true. // -// When the done channel is closed, because the golang `select` statement is -// "uniform pseudo-random", the `fn` might still run one or multiple time, -// though eventually `WaitFor` will return. -func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { - ctx, cancel := contextForChannel(done) - defer cancel() - return WaitForWithContext(ctx, wait.WithContext(), fn.WithContext()) -} +// Deprecated: Will be removed in a future release in favor of +// loopConditionUntilContext. +type waitWithContextFunc func(ctx context.Context) <-chan struct{} -// WaitForWithContext continually checks 'fn' as driven by 'wait'. +// waitForWithContext continually checks 'fn' as driven by 'wait'. // -// WaitForWithContext gets a channel from 'wait()'', and then invokes 'fn' +// waitForWithContext gets a channel from 'wait()”, and then invokes 'fn' // once for every value placed on the channel and once more when the // channel is closed. If the channel is closed and 'fn' -// returns false without error, WaitForWithContext returns ErrWaitTimeout. +// returns false without error, waitForWithContext returns ErrWaitTimeout. // // If 'fn' returns an error the loop ends and that error is returned. If // 'fn' returns true the loop ends and nil is returned. @@ -649,8 +193,11 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { // // When the ctx.Done() channel is closed, because the golang `select` statement is // "uniform pseudo-random", the `fn` might still run one or multiple times, -// though eventually `WaitForWithContext` will return. -func WaitForWithContext(ctx context.Context, wait WaitWithContextFunc, fn ConditionWithContextFunc) error { +// though eventually `waitForWithContext` will return. +// +// Deprecated: Will be removed in a future release in favor of +// loopConditionUntilContext. +func waitForWithContext(ctx context.Context, wait waitWithContextFunc, fn ConditionWithContextFunc) error { waitCtx, cancel := context.WithCancel(context.Background()) defer cancel() c := wait(waitCtx) @@ -668,88 +215,9 @@ func WaitForWithContext(ctx context.Context, wait WaitWithContextFunc, fn Condit return ErrWaitTimeout } case <-ctx.Done(): - // returning ctx.Err() will break backward compatibility + // returning ctx.Err() will break backward compatibility, use new PollUntilContext* + // methods instead return ErrWaitTimeout } } } - -// poller returns a WaitFunc that will send to the channel every interval until -// timeout has elapsed and then closes the channel. -// -// Over very short intervals you may receive no ticks before the channel is -// closed. A timeout of 0 is interpreted as an infinity, and in such a case -// it would be the caller's responsibility to close the done channel. -// Failure to do so would result in a leaked goroutine. -// -// Output ticks are not buffered. If the channel is not ready to receive an -// item, the tick is skipped. -func poller(interval, timeout time.Duration) WaitWithContextFunc { - return WaitWithContextFunc(func(ctx context.Context) <-chan struct{} { - ch := make(chan struct{}) - - go func() { - defer close(ch) - - tick := time.NewTicker(interval) - defer tick.Stop() - - var after <-chan time.Time - if timeout != 0 { - // time.After is more convenient, but it - // potentially leaves timers around much longer - // than necessary if we exit early. - timer := time.NewTimer(timeout) - after = timer.C - defer timer.Stop() - } - - for { - select { - case <-tick.C: - // If the consumer isn't ready for this signal drop it and - // check the other channels. - select { - case ch <- struct{}{}: - default: - } - case <-after: - return - case <-ctx.Done(): - return - } - } - }() - - return ch - }) -} - -// ExponentialBackoffWithContext works with a request context and a Backoff. It ensures that the retry wait never -// exceeds the deadline specified by the request context. -func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionFunc) error { - for backoff.Steps > 0 { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - if ok, err := runConditionWithCrashProtection(condition); err != nil || ok { - return err - } - - if backoff.Steps == 1 { - break - } - - waitBeforeRetry := backoff.Step() - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(waitBeforeRetry): - } - } - - return ErrWaitTimeout -} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/filter.go b/vendor/k8s.io/apimachinery/pkg/watch/filter.go index 22c9449f5..a5735a0b4 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/filter.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/filter.go @@ -32,7 +32,6 @@ type FilterFunc func(in Event) (out Event, keep bool) // WARNING: filter has a fatal flaw, in that it can't properly update the // Type field (Add/Modified/Deleted) to reflect items beginning to pass the // filter when they previously didn't. -// func Filter(w Interface, f FilterFunc) Interface { fw := &filteredWatch{ incoming: w, diff --git a/vendor/k8s.io/apimachinery/pkg/watch/mux.go b/vendor/k8s.io/apimachinery/pkg/watch/mux.go index e01d51906..d51f9567e 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/mux.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/mux.go @@ -17,6 +17,7 @@ limitations under the License. package watch import ( + "fmt" "sync" "k8s.io/apimachinery/pkg/runtime" @@ -44,8 +45,11 @@ type Broadcaster struct { nextWatcher int64 distributing sync.WaitGroup - incoming chan Event - stopped chan struct{} + // incomingBlock allows us to ensure we don't race and end up sending events + // to a closed channel following a broadcaster shutdown. + incomingBlock sync.Mutex + incoming chan Event + stopped chan struct{} // How large to make watcher's channel. watchQueueLength int @@ -111,6 +115,8 @@ func (obj functionFakeRuntimeObject) DeepCopyObject() runtime.Object { // won't ever see that event, and will always see any event after they are // added. func (m *Broadcaster) blockQueue(f func()) { + m.incomingBlock.Lock() + defer m.incomingBlock.Unlock() select { case <-m.stopped: return @@ -132,7 +138,7 @@ func (m *Broadcaster) blockQueue(f func()) { // Note: new watchers will only receive new events. They won't get an entire history // of previous events. It will block until the watcher is actually added to the // broadcaster. -func (m *Broadcaster) Watch() Interface { +func (m *Broadcaster) Watch() (Interface, error) { var w *broadcasterWatcher m.blockQueue(func() { id := m.nextWatcher @@ -146,11 +152,9 @@ func (m *Broadcaster) Watch() Interface { m.watchers[id] = w }) if w == nil { - // The panic here is to be consistent with the previous interface behavior - // we are willing to re-evaluate in the future. - panic("broadcaster already stopped") + return nil, fmt.Errorf("broadcaster already stopped") } - return w + return w, nil } // WatchWithPrefix adds a new watcher to the list and returns an Interface for it. It sends @@ -158,7 +162,7 @@ func (m *Broadcaster) Watch() Interface { // The returned watch will have a queue length that is at least large enough to accommodate // all of the items in queuedEvents. It will block until the watcher is actually added to // the broadcaster. -func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) Interface { +func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) (Interface, error) { var w *broadcasterWatcher m.blockQueue(func() { id := m.nextWatcher @@ -179,11 +183,9 @@ func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) Interface { } }) if w == nil { - // The panic here is to be consistent with the previous interface behavior - // we are willing to re-evaluate in the future. - panic("broadcaster already stopped") + return nil, fmt.Errorf("broadcaster already stopped") } - return w + return w, nil } // stopWatching stops the given watcher and removes it from the list. @@ -210,19 +212,38 @@ func (m *Broadcaster) closeAll() { } // Action distributes the given event among all watchers. -func (m *Broadcaster) Action(action EventType, obj runtime.Object) { +func (m *Broadcaster) Action(action EventType, obj runtime.Object) error { + m.incomingBlock.Lock() + defer m.incomingBlock.Unlock() + select { + case <-m.stopped: + return fmt.Errorf("broadcaster already stopped") + default: + } + m.incoming <- Event{action, obj} + return nil } // Action distributes the given event among all watchers, or drops it on the floor // if too many incoming actions are queued up. Returns true if the action was sent, // false if dropped. -func (m *Broadcaster) ActionOrDrop(action EventType, obj runtime.Object) bool { +func (m *Broadcaster) ActionOrDrop(action EventType, obj runtime.Object) (bool, error) { + m.incomingBlock.Lock() + defer m.incomingBlock.Unlock() + + // Ensure that if the broadcaster is stopped we do not send events to it. + select { + case <-m.stopped: + return false, fmt.Errorf("broadcaster already stopped") + default: + } + select { case m.incoming <- Event{action, obj}: - return true + return true, nil default: - return false + return false, nil } } diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go index 8205a4dd1..5b8514b3f 100644 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go @@ -28,7 +28,7 @@ const ( // TODO: fix the returned errors to be introspectable. func LookupPatchMetadataForStruct(t reflect.Type, jsonField string) ( elemType reflect.Type, patchStrategies []string, patchMergeKey string, e error) { - if t.Kind() == reflect.Ptr { + if t.Kind() == reflect.Pointer { t = t.Elem() } @@ -183,7 +183,7 @@ func typeFields(t reflect.Type) []field { index[len(f.index)] = i ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { + if ft.Name() == "" && ft.Kind() == reflect.Pointer { // Follow pointer. ft = ft.Elem() } diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go new file mode 100644 index 000000000..bd26f427e --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go @@ -0,0 +1,28 @@ +package netutil + +import ( + "net/url" + "strings" +) + +// FROM: http://golang.org/src/net/http/client.go +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// FROM: http://golang.org/src/net/http/transport.go +var portMap = map[string]string{ + "http": "80", + "https": "443", + "socks5": "1080", +} + +// FROM: http://golang.org/src/net/http/transport.go +// canonicalAddr returns url.Host but always with a ":port" suffix +func CanonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go index 6be80349a..511e625b6 100644 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go @@ -100,7 +100,8 @@ func makeUsefulPanic(v reflect.Value) { // Tests for deep equality using reflected types. The map argument tracks // comparisons that have already been seen, which allows short circuiting on // recursive types. -func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool { +// equateNilAndEmpty controls whether empty maps/slices are equivalent to nil +func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, equateNilAndEmpty bool, depth int) bool { defer makeUsefulPanic(v1) if !v1.IsValid() || !v2.IsValid() { @@ -150,17 +151,36 @@ func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, // We don't need to check length here because length is part of // an array's type, which has already been filtered for. for i := 0; i < v1.Len(); i++ { - if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) { + if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, equateNilAndEmpty, depth+1) { return false } } return true case reflect.Slice: - if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) { - return false - } - if v1.IsNil() || v1.Len() == 0 { - return true + if equateNilAndEmpty { + if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) { + return false + } + + if v1.IsNil() || v1.Len() == 0 { + return true + } + } else { + if v1.IsNil() != v2.IsNil() { + return false + } + + // Optimize nil and empty cases + // Two lists that are BOTH nil are equal + // No need to check v2 is nil since v1.IsNil == v2.IsNil from above + if v1.IsNil() { + return true + } + + // Two lists that are both empty and both non nil are equal + if v1.Len() == 0 || v2.Len() == 0 { + return true + } } if v1.Len() != v2.Len() { return false @@ -169,7 +189,7 @@ func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, return true } for i := 0; i < v1.Len(); i++ { - if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) { + if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, equateNilAndEmpty, depth+1) { return false } } @@ -178,22 +198,40 @@ func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, if v1.IsNil() || v2.IsNil() { return v1.IsNil() == v2.IsNil() } - return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1) + return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, equateNilAndEmpty, depth+1) case reflect.Ptr: - return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1) + return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, equateNilAndEmpty, depth+1) case reflect.Struct: for i, n := 0, v1.NumField(); i < n; i++ { - if !e.deepValueEqual(v1.Field(i), v2.Field(i), visited, depth+1) { + if !e.deepValueEqual(v1.Field(i), v2.Field(i), visited, equateNilAndEmpty, depth+1) { return false } } return true case reflect.Map: - if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) { - return false - } - if v1.IsNil() || v1.Len() == 0 { - return true + if equateNilAndEmpty { + if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) { + return false + } + if v1.IsNil() || v1.Len() == 0 { + return true + } + } else { + if v1.IsNil() != v2.IsNil() { + return false + } + + // Optimize nil and empty cases + // Two maps that are BOTH nil are equal + // No need to check v2 is nil since v1.IsNil == v2.IsNil from above + if v1.IsNil() { + return true + } + + // Two maps that are both empty and both non nil are equal + if v1.Len() == 0 || v2.Len() == 0 { + return true + } } if v1.Len() != v2.Len() { return false @@ -202,7 +240,7 @@ func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, return true } for _, k := range v1.MapKeys() { - if !e.deepValueEqual(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) { + if !e.deepValueEqual(v1.MapIndex(k), v2.MapIndex(k), visited, equateNilAndEmpty, depth+1) { return false } } @@ -232,6 +270,14 @@ func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, // Unexported field members cannot be compared and will cause an informative panic; you must add an Equality // function for these types. func (e Equalities) DeepEqual(a1, a2 interface{}) bool { + return e.deepEqual(a1, a2, true) +} + +func (e Equalities) DeepEqualWithNilDifferentFromEmpty(a1, a2 interface{}) bool { + return e.deepEqual(a1, a2, false) +} + +func (e Equalities) deepEqual(a1, a2 interface{}, equateNilAndEmpty bool) bool { if a1 == nil || a2 == nil { return a1 == a2 } @@ -240,7 +286,7 @@ func (e Equalities) DeepEqual(a1, a2 interface{}) bool { if v1.Type() != v2.Type() { return false } - return e.deepValueEqual(v1, v2, make(map[visit]bool), 0) + return e.deepValueEqual(v1, v2, make(map[visit]bool), equateNilAndEmpty, 0) } func (e Equalities) deepValueDerive(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool { @@ -327,7 +373,7 @@ func (e Equalities) deepValueDerive(v1, v2 reflect.Value, visited map[visit]bool return true } return e.deepValueDerive(v1.Elem(), v2.Elem(), visited, depth+1) - case reflect.Ptr: + case reflect.Pointer: if v1.IsNil() { return true } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go new file mode 100644 index 000000000..ea1dc377b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use +// with apply. +type MatchConditionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` +} + +// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with +// apply. +func MatchCondition() *MatchConditionApplyConfiguration { + return &MatchConditionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithName(value string) *MatchConditionApplyConfiguration { + b.Name = &value + return b +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithExpression(value string) *MatchConditionApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go index eba37bafd..faff51a04 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go @@ -37,6 +37,7 @@ type MutatingWebhookApplyConfiguration struct { TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"` ReinvocationPolicy *admissionregistrationv1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } // MutatingWebhookApplyConfiguration constructs an declarative configuration of the MutatingWebhook type for use with @@ -139,3 +140,16 @@ func (b *MutatingWebhookApplyConfiguration) WithReinvocationPolicy(value admissi b.ReinvocationPolicy = &value return b } + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *MutatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *MutatingWebhookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go index d0691de10..613856bac 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go @@ -36,6 +36,7 @@ type ValidatingWebhookApplyConfiguration struct { SideEffects *admissionregistrationv1.SideEffectClass `json:"sideEffects,omitempty"` TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } // ValidatingWebhookApplyConfiguration constructs an declarative configuration of the ValidatingWebhook type for use with @@ -130,3 +131,16 @@ func (b *ValidatingWebhookApplyConfiguration) WithAdmissionReviewVersions(values } return b } + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *ValidatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingWebhookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go new file mode 100644 index 000000000..023695139 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// AuditAnnotationApplyConfiguration represents an declarative configuration of the AuditAnnotation type for use +// with apply. +type AuditAnnotationApplyConfiguration struct { + Key *string `json:"key,omitempty"` + ValueExpression *string `json:"valueExpression,omitempty"` +} + +// AuditAnnotationApplyConfiguration constructs an declarative configuration of the AuditAnnotation type for use with +// apply. +func AuditAnnotation() *AuditAnnotationApplyConfiguration { + return &AuditAnnotationApplyConfiguration{} +} + +// WithKey sets the Key field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Key field is set to the value of the last call. +func (b *AuditAnnotationApplyConfiguration) WithKey(value string) *AuditAnnotationApplyConfiguration { + b.Key = &value + return b +} + +// WithValueExpression sets the ValueExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ValueExpression field is set to the value of the last call. +func (b *AuditAnnotationApplyConfiguration) WithValueExpression(value string) *AuditAnnotationApplyConfiguration { + b.ValueExpression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go new file mode 100644 index 000000000..f8b511f51 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ExpressionWarningApplyConfiguration represents an declarative configuration of the ExpressionWarning type for use +// with apply. +type ExpressionWarningApplyConfiguration struct { + FieldRef *string `json:"fieldRef,omitempty"` + Warning *string `json:"warning,omitempty"` +} + +// ExpressionWarningApplyConfiguration constructs an declarative configuration of the ExpressionWarning type for use with +// apply. +func ExpressionWarning() *ExpressionWarningApplyConfiguration { + return &ExpressionWarningApplyConfiguration{} +} + +// WithFieldRef sets the FieldRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FieldRef field is set to the value of the last call. +func (b *ExpressionWarningApplyConfiguration) WithFieldRef(value string) *ExpressionWarningApplyConfiguration { + b.FieldRef = &value + return b +} + +// WithWarning sets the Warning field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Warning field is set to the value of the last call. +func (b *ExpressionWarningApplyConfiguration) WithWarning(value string) *ExpressionWarningApplyConfiguration { + b.Warning = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go new file mode 100644 index 000000000..186c750f9 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use +// with apply. +type MatchConditionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` +} + +// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with +// apply. +func MatchCondition() *MatchConditionApplyConfiguration { + return &MatchConditionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithName(value string) *MatchConditionApplyConfiguration { + b.Name = &value + return b +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithExpression(value string) *MatchConditionApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go new file mode 100644 index 000000000..a6710ac7e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MatchResourcesApplyConfiguration represents an declarative configuration of the MatchResources type for use +// with apply. +type MatchResourcesApplyConfiguration struct { + NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + ObjectSelector *v1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"` + ResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"resourceRules,omitempty"` + ExcludeResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"excludeResourceRules,omitempty"` + MatchPolicy *admissionregistrationv1alpha1.MatchPolicyType `json:"matchPolicy,omitempty"` +} + +// MatchResourcesApplyConfiguration constructs an declarative configuration of the MatchResources type for use with +// apply. +func MatchResources() *MatchResourcesApplyConfiguration { + return &MatchResourcesApplyConfiguration{} +} + +// WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NamespaceSelector field is set to the value of the last call. +func (b *MatchResourcesApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration { + b.NamespaceSelector = value + return b +} + +// WithObjectSelector sets the ObjectSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObjectSelector field is set to the value of the last call. +func (b *MatchResourcesApplyConfiguration) WithObjectSelector(value *v1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration { + b.ObjectSelector = value + return b +} + +// WithResourceRules adds the given value to the ResourceRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceRules field. +func (b *MatchResourcesApplyConfiguration) WithResourceRules(values ...*NamedRuleWithOperationsApplyConfiguration) *MatchResourcesApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceRules") + } + b.ResourceRules = append(b.ResourceRules, *values[i]) + } + return b +} + +// WithExcludeResourceRules adds the given value to the ExcludeResourceRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExcludeResourceRules field. +func (b *MatchResourcesApplyConfiguration) WithExcludeResourceRules(values ...*NamedRuleWithOperationsApplyConfiguration) *MatchResourcesApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithExcludeResourceRules") + } + b.ExcludeResourceRules = append(b.ExcludeResourceRules, *values[i]) + } + return b +} + +// WithMatchPolicy sets the MatchPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchPolicy field is set to the value of the last call. +func (b *MatchResourcesApplyConfiguration) WithMatchPolicy(value admissionregistrationv1alpha1.MatchPolicyType) *MatchResourcesApplyConfiguration { + b.MatchPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go new file mode 100644 index 000000000..bb2a7ba89 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go @@ -0,0 +1,95 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + v1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" +) + +// NamedRuleWithOperationsApplyConfiguration represents an declarative configuration of the NamedRuleWithOperations type for use +// with apply. +type NamedRuleWithOperationsApplyConfiguration struct { + ResourceNames []string `json:"resourceNames,omitempty"` + v1.RuleWithOperationsApplyConfiguration `json:",inline"` +} + +// NamedRuleWithOperationsApplyConfiguration constructs an declarative configuration of the NamedRuleWithOperations type for use with +// apply. +func NamedRuleWithOperations() *NamedRuleWithOperationsApplyConfiguration { + return &NamedRuleWithOperationsApplyConfiguration{} +} + +// WithResourceNames adds the given value to the ResourceNames field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceNames field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithResourceNames(values ...string) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.ResourceNames = append(b.ResourceNames, values[i]) + } + return b +} + +// WithOperations adds the given value to the Operations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Operations field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...admissionregistrationv1.OperationType) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.Operations = append(b.Operations, values[i]) + } + return b +} + +// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIGroups field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.APIGroups = append(b.APIGroups, values[i]) + } + return b +} + +// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIVersions field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.APIVersions = append(b.APIVersions, values[i]) + } + return b +} + +// WithResources adds the given value to the Resources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Resources field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...string) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.Resources = append(b.Resources, values[i]) + } + return b +} + +// WithScope sets the Scope field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Scope field is set to the value of the last call. +func (b *NamedRuleWithOperationsApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *NamedRuleWithOperationsApplyConfiguration { + b.Scope = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go new file mode 100644 index 000000000..350993cea --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ParamKindApplyConfiguration represents an declarative configuration of the ParamKind type for use +// with apply. +type ParamKindApplyConfiguration struct { + APIVersion *string `json:"apiVersion,omitempty"` + Kind *string `json:"kind,omitempty"` +} + +// ParamKindApplyConfiguration constructs an declarative configuration of the ParamKind type for use with +// apply. +func ParamKind() *ParamKindApplyConfiguration { + return &ParamKindApplyConfiguration{} +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ParamKindApplyConfiguration) WithAPIVersion(value string) *ParamKindApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ParamKindApplyConfiguration) WithKind(value string) *ParamKindApplyConfiguration { + b.Kind = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go new file mode 100644 index 000000000..0951cae8a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ParamRefApplyConfiguration represents an declarative configuration of the ParamRef type for use +// with apply. +type ParamRefApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + ParameterNotFoundAction *v1alpha1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` +} + +// ParamRefApplyConfiguration constructs an declarative configuration of the ParamRef type for use with +// apply. +func ParamRef() *ParamRefApplyConfiguration { + return &ParamRefApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithName(value string) *ParamRefApplyConfiguration { + b.Name = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithNamespace(value string) *ParamRefApplyConfiguration { + b.Namespace = &value + return b +} + +// WithSelector sets the Selector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Selector field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *ParamRefApplyConfiguration { + b.Selector = value + return b +} + +// WithParameterNotFoundAction sets the ParameterNotFoundAction field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParameterNotFoundAction field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithParameterNotFoundAction(value v1alpha1.ParameterNotFoundActionType) *ParamRefApplyConfiguration { + b.ParameterNotFoundAction = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go new file mode 100644 index 000000000..42a917071 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// TypeCheckingApplyConfiguration represents an declarative configuration of the TypeChecking type for use +// with apply. +type TypeCheckingApplyConfiguration struct { + ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"` +} + +// TypeCheckingApplyConfiguration constructs an declarative configuration of the TypeChecking type for use with +// apply. +func TypeChecking() *TypeCheckingApplyConfiguration { + return &TypeCheckingApplyConfiguration{} +} + +// WithExpressionWarnings adds the given value to the ExpressionWarnings field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExpressionWarnings field. +func (b *TypeCheckingApplyConfiguration) WithExpressionWarnings(values ...*ExpressionWarningApplyConfiguration) *TypeCheckingApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithExpressionWarnings") + } + b.ExpressionWarnings = append(b.ExpressionWarnings, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go new file mode 100644 index 000000000..c860b85cf --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -0,0 +1,256 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicy type for use +// with apply. +type ValidatingAdmissionPolicyApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"` + Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"` +} + +// ValidatingAdmissionPolicy constructs an declarative configuration of the ValidatingAdmissionPolicy type for use with +// apply. +func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfiguration { + b := &ValidatingAdmissionPolicyApplyConfiguration{} + b.WithName(name) + b.WithKind("ValidatingAdmissionPolicy") + b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1") + return b +} + +// ExtractValidatingAdmissionPolicy extracts the applied configuration owned by fieldManager from +// validatingAdmissionPolicy. If no managedFields are found in validatingAdmissionPolicy for fieldManager, a +// ValidatingAdmissionPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// validatingAdmissionPolicy must be a unmodified ValidatingAdmissionPolicy API object that was retrieved from the Kubernetes API. +// ExtractValidatingAdmissionPolicy provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { + return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "") +} + +// ExtractValidatingAdmissionPolicyStatus is the same as ExtractValidatingAdmissionPolicy except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractValidatingAdmissionPolicyStatus(validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { + return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "status") +} + +func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, fieldManager string, subresource string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { + b := &ValidatingAdmissionPolicyApplyConfiguration{} + err := managedfields.ExtractInto(validatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(validatingAdmissionPolicy.Name) + + b.WithKind("ValidatingAdmissionPolicy") + b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ValidatingAdmissionPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithSpec(value *ValidatingAdmissionPolicySpecApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *ValidatingAdmissionPolicyStatusApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go new file mode 100644 index 000000000..dc0822640 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go @@ -0,0 +1,247 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyBindingApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBinding type for use +// with apply. +type ValidatingAdmissionPolicyBindingApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ValidatingAdmissionPolicyBinding constructs an declarative configuration of the ValidatingAdmissionPolicyBinding type for use with +// apply. +func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} + b.WithName(name) + b.WithKind("ValidatingAdmissionPolicyBinding") + b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1") + return b +} + +// ExtractValidatingAdmissionPolicyBinding extracts the applied configuration owned by fieldManager from +// validatingAdmissionPolicyBinding. If no managedFields are found in validatingAdmissionPolicyBinding for fieldManager, a +// ValidatingAdmissionPolicyBindingApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// validatingAdmissionPolicyBinding must be a unmodified ValidatingAdmissionPolicyBinding API object that was retrieved from the Kubernetes API. +// ExtractValidatingAdmissionPolicyBinding provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { + return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "") +} + +// ExtractValidatingAdmissionPolicyBindingStatus is the same as ExtractValidatingAdmissionPolicyBinding except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractValidatingAdmissionPolicyBindingStatus(validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { + return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "status") +} + +func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, fieldManager string, subresource string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { + b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} + err := managedfields.ExtractInto(validatingAdmissionPolicyBinding, internal.Parser().Type("io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(validatingAdmissionPolicyBinding.Name) + + b.WithKind("ValidatingAdmissionPolicyBinding") + b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.Spec = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go new file mode 100644 index 000000000..c9a4ff7ab --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go @@ -0,0 +1,72 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" +) + +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use +// with apply. +type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { + PolicyName *string `json:"policyName,omitempty"` + ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"` + MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"` + ValidationActions []admissionregistrationv1alpha1.ValidationAction `json:"validationActions,omitempty"` +} + +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with +// apply. +func ValidatingAdmissionPolicyBindingSpec() *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + return &ValidatingAdmissionPolicyBindingSpecApplyConfiguration{} +} + +// WithPolicyName sets the PolicyName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PolicyName field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithPolicyName(value string) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + b.PolicyName = &value + return b +} + +// WithParamRef sets the ParamRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParamRef field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithParamRef(value *ParamRefApplyConfiguration) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + b.ParamRef = value + return b +} + +// WithMatchResources sets the MatchResources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchResources field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithMatchResources(value *MatchResourcesApplyConfiguration) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + b.MatchResources = value + return b +} + +// WithValidationActions adds the given value to the ValidationActions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ValidationActions field. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithValidationActions(values ...admissionregistrationv1alpha1.ValidationAction) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + for i := range values { + b.ValidationActions = append(b.ValidationActions, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go new file mode 100644 index 000000000..7ee320e42 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go @@ -0,0 +1,117 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" +) + +// ValidatingAdmissionPolicySpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicySpec type for use +// with apply. +type ValidatingAdmissionPolicySpecApplyConfiguration struct { + ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"` + MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"` + Validations []ValidationApplyConfiguration `json:"validations,omitempty"` + FailurePolicy *admissionregistrationv1alpha1.FailurePolicyType `json:"failurePolicy,omitempty"` + AuditAnnotations []AuditAnnotationApplyConfiguration `json:"auditAnnotations,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` + Variables []VariableApplyConfiguration `json:"variables,omitempty"` +} + +// ValidatingAdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicySpec type for use with +// apply. +func ValidatingAdmissionPolicySpec() *ValidatingAdmissionPolicySpecApplyConfiguration { + return &ValidatingAdmissionPolicySpecApplyConfiguration{} +} + +// WithParamKind sets the ParamKind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParamKind field is set to the value of the last call. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithParamKind(value *ParamKindApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + b.ParamKind = value + return b +} + +// WithMatchConstraints sets the MatchConstraints field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchConstraints field is set to the value of the last call. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithMatchConstraints(value *MatchResourcesApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + b.MatchConstraints = value + return b +} + +// WithValidations adds the given value to the Validations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Validations field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithValidations(values ...*ValidationApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithValidations") + } + b.Validations = append(b.Validations, *values[i]) + } + return b +} + +// WithFailurePolicy sets the FailurePolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FailurePolicy field is set to the value of the last call. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithFailurePolicy(value admissionregistrationv1alpha1.FailurePolicyType) *ValidatingAdmissionPolicySpecApplyConfiguration { + b.FailurePolicy = &value + return b +} + +// WithAuditAnnotations adds the given value to the AuditAnnotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AuditAnnotations field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithAuditAnnotations(values ...*AuditAnnotationApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAuditAnnotations") + } + b.AuditAnnotations = append(b.AuditAnnotations, *values[i]) + } + return b +} + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} + +// WithVariables adds the given value to the Variables field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Variables field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithVariables(values ...*VariableApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithVariables") + } + b.Variables = append(b.Variables, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go new file mode 100644 index 000000000..821184c8a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyStatusApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyStatus type for use +// with apply. +type ValidatingAdmissionPolicyStatusApplyConfiguration struct { + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + TypeChecking *TypeCheckingApplyConfiguration `json:"typeChecking,omitempty"` + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ValidatingAdmissionPolicyStatusApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyStatus type for use with +// apply. +func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration { + return &ValidatingAdmissionPolicyStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithObservedGeneration(value int64) *ValidatingAdmissionPolicyStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithTypeChecking sets the TypeChecking field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TypeChecking field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithTypeChecking(value *TypeCheckingApplyConfiguration) *ValidatingAdmissionPolicyStatusApplyConfiguration { + b.TypeChecking = value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ValidatingAdmissionPolicyStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go new file mode 100644 index 000000000..9a5fc8475 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ValidationApplyConfiguration represents an declarative configuration of the Validation type for use +// with apply. +type ValidationApplyConfiguration struct { + Expression *string `json:"expression,omitempty"` + Message *string `json:"message,omitempty"` + Reason *v1.StatusReason `json:"reason,omitempty"` + MessageExpression *string `json:"messageExpression,omitempty"` +} + +// ValidationApplyConfiguration constructs an declarative configuration of the Validation type for use with +// apply. +func Validation() *ValidationApplyConfiguration { + return &ValidationApplyConfiguration{} +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithExpression(value string) *ValidationApplyConfiguration { + b.Expression = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithMessage(value string) *ValidationApplyConfiguration { + b.Message = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithReason(value v1.StatusReason) *ValidationApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessageExpression sets the MessageExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MessageExpression field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithMessageExpression(value string) *ValidationApplyConfiguration { + b.MessageExpression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go new file mode 100644 index 000000000..2c70a8cfb --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// VariableApplyConfiguration represents an declarative configuration of the Variable type for use +// with apply. +type VariableApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` +} + +// VariableApplyConfiguration constructs an declarative configuration of the Variable type for use with +// apply. +func Variable() *VariableApplyConfiguration { + return &VariableApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *VariableApplyConfiguration) WithName(value string) *VariableApplyConfiguration { + b.Name = &value + return b +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *VariableApplyConfiguration) WithExpression(value string) *VariableApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go new file mode 100644 index 000000000..e92fba0dd --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// AuditAnnotationApplyConfiguration represents an declarative configuration of the AuditAnnotation type for use +// with apply. +type AuditAnnotationApplyConfiguration struct { + Key *string `json:"key,omitempty"` + ValueExpression *string `json:"valueExpression,omitempty"` +} + +// AuditAnnotationApplyConfiguration constructs an declarative configuration of the AuditAnnotation type for use with +// apply. +func AuditAnnotation() *AuditAnnotationApplyConfiguration { + return &AuditAnnotationApplyConfiguration{} +} + +// WithKey sets the Key field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Key field is set to the value of the last call. +func (b *AuditAnnotationApplyConfiguration) WithKey(value string) *AuditAnnotationApplyConfiguration { + b.Key = &value + return b +} + +// WithValueExpression sets the ValueExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ValueExpression field is set to the value of the last call. +func (b *AuditAnnotationApplyConfiguration) WithValueExpression(value string) *AuditAnnotationApplyConfiguration { + b.ValueExpression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go new file mode 100644 index 000000000..059c1b94b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ExpressionWarningApplyConfiguration represents an declarative configuration of the ExpressionWarning type for use +// with apply. +type ExpressionWarningApplyConfiguration struct { + FieldRef *string `json:"fieldRef,omitempty"` + Warning *string `json:"warning,omitempty"` +} + +// ExpressionWarningApplyConfiguration constructs an declarative configuration of the ExpressionWarning type for use with +// apply. +func ExpressionWarning() *ExpressionWarningApplyConfiguration { + return &ExpressionWarningApplyConfiguration{} +} + +// WithFieldRef sets the FieldRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FieldRef field is set to the value of the last call. +func (b *ExpressionWarningApplyConfiguration) WithFieldRef(value string) *ExpressionWarningApplyConfiguration { + b.FieldRef = &value + return b +} + +// WithWarning sets the Warning field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Warning field is set to the value of the last call. +func (b *ExpressionWarningApplyConfiguration) WithWarning(value string) *ExpressionWarningApplyConfiguration { + b.Warning = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go new file mode 100644 index 000000000..d099b6b6e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use +// with apply. +type MatchConditionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` +} + +// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with +// apply. +func MatchCondition() *MatchConditionApplyConfiguration { + return &MatchConditionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithName(value string) *MatchConditionApplyConfiguration { + b.Name = &value + return b +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithExpression(value string) *MatchConditionApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go new file mode 100644 index 000000000..25d4139db --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MatchResourcesApplyConfiguration represents an declarative configuration of the MatchResources type for use +// with apply. +type MatchResourcesApplyConfiguration struct { + NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + ObjectSelector *v1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"` + ResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"resourceRules,omitempty"` + ExcludeResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"excludeResourceRules,omitempty"` + MatchPolicy *admissionregistrationv1beta1.MatchPolicyType `json:"matchPolicy,omitempty"` +} + +// MatchResourcesApplyConfiguration constructs an declarative configuration of the MatchResources type for use with +// apply. +func MatchResources() *MatchResourcesApplyConfiguration { + return &MatchResourcesApplyConfiguration{} +} + +// WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NamespaceSelector field is set to the value of the last call. +func (b *MatchResourcesApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration { + b.NamespaceSelector = value + return b +} + +// WithObjectSelector sets the ObjectSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObjectSelector field is set to the value of the last call. +func (b *MatchResourcesApplyConfiguration) WithObjectSelector(value *v1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration { + b.ObjectSelector = value + return b +} + +// WithResourceRules adds the given value to the ResourceRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceRules field. +func (b *MatchResourcesApplyConfiguration) WithResourceRules(values ...*NamedRuleWithOperationsApplyConfiguration) *MatchResourcesApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceRules") + } + b.ResourceRules = append(b.ResourceRules, *values[i]) + } + return b +} + +// WithExcludeResourceRules adds the given value to the ExcludeResourceRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExcludeResourceRules field. +func (b *MatchResourcesApplyConfiguration) WithExcludeResourceRules(values ...*NamedRuleWithOperationsApplyConfiguration) *MatchResourcesApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithExcludeResourceRules") + } + b.ExcludeResourceRules = append(b.ExcludeResourceRules, *values[i]) + } + return b +} + +// WithMatchPolicy sets the MatchPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchPolicy field is set to the value of the last call. +func (b *MatchResourcesApplyConfiguration) WithMatchPolicy(value admissionregistrationv1beta1.MatchPolicyType) *MatchResourcesApplyConfiguration { + b.MatchPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go index ddb728aff..54845341f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go @@ -20,7 +20,8 @@ package v1beta1 import ( admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + v1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // MutatingWebhookApplyConfiguration represents an declarative configuration of the MutatingWebhook type for use @@ -28,15 +29,16 @@ import ( type MutatingWebhookApplyConfiguration struct { Name *string `json:"name,omitempty"` ClientConfig *WebhookClientConfigApplyConfiguration `json:"clientConfig,omitempty"` - Rules []RuleWithOperationsApplyConfiguration `json:"rules,omitempty"` + Rules []v1.RuleWithOperationsApplyConfiguration `json:"rules,omitempty"` FailurePolicy *admissionregistrationv1beta1.FailurePolicyType `json:"failurePolicy,omitempty"` MatchPolicy *admissionregistrationv1beta1.MatchPolicyType `json:"matchPolicy,omitempty"` - NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` - ObjectSelector *v1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"` + NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + ObjectSelector *metav1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"` SideEffects *admissionregistrationv1beta1.SideEffectClass `json:"sideEffects,omitempty"` TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"` ReinvocationPolicy *admissionregistrationv1beta1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } // MutatingWebhookApplyConfiguration constructs an declarative configuration of the MutatingWebhook type for use with @@ -64,7 +66,7 @@ func (b *MutatingWebhookApplyConfiguration) WithClientConfig(value *WebhookClien // WithRules adds the given value to the Rules field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Rules field. -func (b *MutatingWebhookApplyConfiguration) WithRules(values ...*RuleWithOperationsApplyConfiguration) *MutatingWebhookApplyConfiguration { +func (b *MutatingWebhookApplyConfiguration) WithRules(values ...*v1.RuleWithOperationsApplyConfiguration) *MutatingWebhookApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithRules") @@ -93,7 +95,7 @@ func (b *MutatingWebhookApplyConfiguration) WithMatchPolicy(value admissionregis // WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NamespaceSelector field is set to the value of the last call. -func (b *MutatingWebhookApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *MutatingWebhookApplyConfiguration { +func (b *MutatingWebhookApplyConfiguration) WithNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *MutatingWebhookApplyConfiguration { b.NamespaceSelector = value return b } @@ -101,7 +103,7 @@ func (b *MutatingWebhookApplyConfiguration) WithNamespaceSelector(value *v1.Labe // WithObjectSelector sets the ObjectSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObjectSelector field is set to the value of the last call. -func (b *MutatingWebhookApplyConfiguration) WithObjectSelector(value *v1.LabelSelectorApplyConfiguration) *MutatingWebhookApplyConfiguration { +func (b *MutatingWebhookApplyConfiguration) WithObjectSelector(value *metav1.LabelSelectorApplyConfiguration) *MutatingWebhookApplyConfiguration { b.ObjectSelector = value return b } @@ -139,3 +141,16 @@ func (b *MutatingWebhookApplyConfiguration) WithReinvocationPolicy(value admissi b.ReinvocationPolicy = &value return b } + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *MutatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *MutatingWebhookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go similarity index 56% rename from vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go rename to vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go index e072edb85..fa346c4a5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go @@ -19,26 +19,37 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + v1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" ) -// RuleWithOperationsApplyConfiguration represents an declarative configuration of the RuleWithOperations type for use +// NamedRuleWithOperationsApplyConfiguration represents an declarative configuration of the NamedRuleWithOperations type for use // with apply. -type RuleWithOperationsApplyConfiguration struct { - Operations []v1beta1.OperationType `json:"operations,omitempty"` - RuleApplyConfiguration `json:",inline"` +type NamedRuleWithOperationsApplyConfiguration struct { + ResourceNames []string `json:"resourceNames,omitempty"` + v1.RuleWithOperationsApplyConfiguration `json:",inline"` } -// RuleWithOperationsApplyConfiguration constructs an declarative configuration of the RuleWithOperations type for use with +// NamedRuleWithOperationsApplyConfiguration constructs an declarative configuration of the NamedRuleWithOperations type for use with // apply. -func RuleWithOperations() *RuleWithOperationsApplyConfiguration { - return &RuleWithOperationsApplyConfiguration{} +func NamedRuleWithOperations() *NamedRuleWithOperationsApplyConfiguration { + return &NamedRuleWithOperationsApplyConfiguration{} +} + +// WithResourceNames adds the given value to the ResourceNames field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceNames field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithResourceNames(values ...string) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.ResourceNames = append(b.ResourceNames, values[i]) + } + return b } // WithOperations adds the given value to the Operations field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Operations field. -func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...v1beta1.OperationType) *RuleWithOperationsApplyConfiguration { +func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...admissionregistrationv1.OperationType) *NamedRuleWithOperationsApplyConfiguration { for i := range values { b.Operations = append(b.Operations, values[i]) } @@ -48,7 +59,7 @@ func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...v1beta1. // WithAPIGroups adds the given value to the APIGroups field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the APIGroups field. -func (b *RuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *RuleWithOperationsApplyConfiguration { +func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { b.APIGroups = append(b.APIGroups, values[i]) } @@ -58,7 +69,7 @@ func (b *RuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) * // WithAPIVersions adds the given value to the APIVersions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the APIVersions field. -func (b *RuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *RuleWithOperationsApplyConfiguration { +func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { b.APIVersions = append(b.APIVersions, values[i]) } @@ -68,7 +79,7 @@ func (b *RuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) // WithResources adds the given value to the Resources field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Resources field. -func (b *RuleWithOperationsApplyConfiguration) WithResources(values ...string) *RuleWithOperationsApplyConfiguration { +func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { b.Resources = append(b.Resources, values[i]) } @@ -78,7 +89,7 @@ func (b *RuleWithOperationsApplyConfiguration) WithResources(values ...string) * // WithScope sets the Scope field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleWithOperationsApplyConfiguration) WithScope(value v1beta1.ScopeType) *RuleWithOperationsApplyConfiguration { +func (b *NamedRuleWithOperationsApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *NamedRuleWithOperationsApplyConfiguration { b.Scope = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go new file mode 100644 index 000000000..6050e6025 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ParamKindApplyConfiguration represents an declarative configuration of the ParamKind type for use +// with apply. +type ParamKindApplyConfiguration struct { + APIVersion *string `json:"apiVersion,omitempty"` + Kind *string `json:"kind,omitempty"` +} + +// ParamKindApplyConfiguration constructs an declarative configuration of the ParamKind type for use with +// apply. +func ParamKind() *ParamKindApplyConfiguration { + return &ParamKindApplyConfiguration{} +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ParamKindApplyConfiguration) WithAPIVersion(value string) *ParamKindApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ParamKindApplyConfiguration) WithKind(value string) *ParamKindApplyConfiguration { + b.Kind = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go new file mode 100644 index 000000000..2be98dbc5 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ParamRefApplyConfiguration represents an declarative configuration of the ParamRef type for use +// with apply. +type ParamRefApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + ParameterNotFoundAction *v1beta1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` +} + +// ParamRefApplyConfiguration constructs an declarative configuration of the ParamRef type for use with +// apply. +func ParamRef() *ParamRefApplyConfiguration { + return &ParamRefApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithName(value string) *ParamRefApplyConfiguration { + b.Name = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithNamespace(value string) *ParamRefApplyConfiguration { + b.Namespace = &value + return b +} + +// WithSelector sets the Selector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Selector field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *ParamRefApplyConfiguration { + b.Selector = value + return b +} + +// WithParameterNotFoundAction sets the ParameterNotFoundAction field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParameterNotFoundAction field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithParameterNotFoundAction(value v1beta1.ParameterNotFoundActionType) *ParamRefApplyConfiguration { + b.ParameterNotFoundAction = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go deleted file mode 100644 index 21151b998..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" -) - -// RuleApplyConfiguration represents an declarative configuration of the Rule type for use -// with apply. -type RuleApplyConfiguration struct { - APIGroups []string `json:"apiGroups,omitempty"` - APIVersions []string `json:"apiVersions,omitempty"` - Resources []string `json:"resources,omitempty"` - Scope *v1beta1.ScopeType `json:"scope,omitempty"` -} - -// RuleApplyConfiguration constructs an declarative configuration of the Rule type for use with -// apply. -func Rule() *RuleApplyConfiguration { - return &RuleApplyConfiguration{} -} - -// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIGroups field. -func (b *RuleApplyConfiguration) WithAPIGroups(values ...string) *RuleApplyConfiguration { - for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) - } - return b -} - -// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIVersions field. -func (b *RuleApplyConfiguration) WithAPIVersions(values ...string) *RuleApplyConfiguration { - for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) - } - return b -} - -// WithResources adds the given value to the Resources field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Resources field. -func (b *RuleApplyConfiguration) WithResources(values ...string) *RuleApplyConfiguration { - for i := range values { - b.Resources = append(b.Resources, values[i]) - } - return b -} - -// WithScope sets the Scope field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleApplyConfiguration) WithScope(value v1beta1.ScopeType) *RuleApplyConfiguration { - b.Scope = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go new file mode 100644 index 000000000..07baf334c --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// TypeCheckingApplyConfiguration represents an declarative configuration of the TypeChecking type for use +// with apply. +type TypeCheckingApplyConfiguration struct { + ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"` +} + +// TypeCheckingApplyConfiguration constructs an declarative configuration of the TypeChecking type for use with +// apply. +func TypeChecking() *TypeCheckingApplyConfiguration { + return &TypeCheckingApplyConfiguration{} +} + +// WithExpressionWarnings adds the given value to the ExpressionWarnings field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExpressionWarnings field. +func (b *TypeCheckingApplyConfiguration) WithExpressionWarnings(values ...*ExpressionWarningApplyConfiguration) *TypeCheckingApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithExpressionWarnings") + } + b.ExpressionWarnings = append(b.ExpressionWarnings, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go new file mode 100644 index 000000000..e144bc9f7 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go @@ -0,0 +1,256 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicy type for use +// with apply. +type ValidatingAdmissionPolicyApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"` + Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"` +} + +// ValidatingAdmissionPolicy constructs an declarative configuration of the ValidatingAdmissionPolicy type for use with +// apply. +func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfiguration { + b := &ValidatingAdmissionPolicyApplyConfiguration{} + b.WithName(name) + b.WithKind("ValidatingAdmissionPolicy") + b.WithAPIVersion("admissionregistration.k8s.io/v1beta1") + return b +} + +// ExtractValidatingAdmissionPolicy extracts the applied configuration owned by fieldManager from +// validatingAdmissionPolicy. If no managedFields are found in validatingAdmissionPolicy for fieldManager, a +// ValidatingAdmissionPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// validatingAdmissionPolicy must be a unmodified ValidatingAdmissionPolicy API object that was retrieved from the Kubernetes API. +// ExtractValidatingAdmissionPolicy provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { + return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "") +} + +// ExtractValidatingAdmissionPolicyStatus is the same as ExtractValidatingAdmissionPolicy except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractValidatingAdmissionPolicyStatus(validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { + return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "status") +} + +func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, fieldManager string, subresource string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { + b := &ValidatingAdmissionPolicyApplyConfiguration{} + err := managedfields.ExtractInto(validatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicy"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(validatingAdmissionPolicy.Name) + + b.WithKind("ValidatingAdmissionPolicy") + b.WithAPIVersion("admissionregistration.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ValidatingAdmissionPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithSpec(value *ValidatingAdmissionPolicySpecApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *ValidatingAdmissionPolicyStatusApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go new file mode 100644 index 000000000..0dc06aede --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go @@ -0,0 +1,247 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyBindingApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBinding type for use +// with apply. +type ValidatingAdmissionPolicyBindingApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ValidatingAdmissionPolicyBinding constructs an declarative configuration of the ValidatingAdmissionPolicyBinding type for use with +// apply. +func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} + b.WithName(name) + b.WithKind("ValidatingAdmissionPolicyBinding") + b.WithAPIVersion("admissionregistration.k8s.io/v1beta1") + return b +} + +// ExtractValidatingAdmissionPolicyBinding extracts the applied configuration owned by fieldManager from +// validatingAdmissionPolicyBinding. If no managedFields are found in validatingAdmissionPolicyBinding for fieldManager, a +// ValidatingAdmissionPolicyBindingApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// validatingAdmissionPolicyBinding must be a unmodified ValidatingAdmissionPolicyBinding API object that was retrieved from the Kubernetes API. +// ExtractValidatingAdmissionPolicyBinding provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { + return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "") +} + +// ExtractValidatingAdmissionPolicyBindingStatus is the same as ExtractValidatingAdmissionPolicyBinding except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractValidatingAdmissionPolicyBindingStatus(validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { + return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "status") +} + +func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, fieldManager string, subresource string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { + b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} + err := managedfields.ExtractInto(validatingAdmissionPolicyBinding, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBinding"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(validatingAdmissionPolicyBinding.Name) + + b.WithKind("ValidatingAdmissionPolicyBinding") + b.WithAPIVersion("admissionregistration.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.Spec = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go new file mode 100644 index 000000000..d20a78eff --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go @@ -0,0 +1,72 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" +) + +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use +// with apply. +type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { + PolicyName *string `json:"policyName,omitempty"` + ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"` + MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"` + ValidationActions []admissionregistrationv1beta1.ValidationAction `json:"validationActions,omitempty"` +} + +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with +// apply. +func ValidatingAdmissionPolicyBindingSpec() *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + return &ValidatingAdmissionPolicyBindingSpecApplyConfiguration{} +} + +// WithPolicyName sets the PolicyName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PolicyName field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithPolicyName(value string) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + b.PolicyName = &value + return b +} + +// WithParamRef sets the ParamRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParamRef field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithParamRef(value *ParamRefApplyConfiguration) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + b.ParamRef = value + return b +} + +// WithMatchResources sets the MatchResources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchResources field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithMatchResources(value *MatchResourcesApplyConfiguration) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + b.MatchResources = value + return b +} + +// WithValidationActions adds the given value to the ValidationActions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ValidationActions field. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithValidationActions(values ...admissionregistrationv1beta1.ValidationAction) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + for i := range values { + b.ValidationActions = append(b.ValidationActions, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go new file mode 100644 index 000000000..c6e938910 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go @@ -0,0 +1,117 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" +) + +// ValidatingAdmissionPolicySpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicySpec type for use +// with apply. +type ValidatingAdmissionPolicySpecApplyConfiguration struct { + ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"` + MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"` + Validations []ValidationApplyConfiguration `json:"validations,omitempty"` + FailurePolicy *admissionregistrationv1beta1.FailurePolicyType `json:"failurePolicy,omitempty"` + AuditAnnotations []AuditAnnotationApplyConfiguration `json:"auditAnnotations,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` + Variables []VariableApplyConfiguration `json:"variables,omitempty"` +} + +// ValidatingAdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicySpec type for use with +// apply. +func ValidatingAdmissionPolicySpec() *ValidatingAdmissionPolicySpecApplyConfiguration { + return &ValidatingAdmissionPolicySpecApplyConfiguration{} +} + +// WithParamKind sets the ParamKind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParamKind field is set to the value of the last call. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithParamKind(value *ParamKindApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + b.ParamKind = value + return b +} + +// WithMatchConstraints sets the MatchConstraints field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchConstraints field is set to the value of the last call. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithMatchConstraints(value *MatchResourcesApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + b.MatchConstraints = value + return b +} + +// WithValidations adds the given value to the Validations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Validations field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithValidations(values ...*ValidationApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithValidations") + } + b.Validations = append(b.Validations, *values[i]) + } + return b +} + +// WithFailurePolicy sets the FailurePolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FailurePolicy field is set to the value of the last call. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithFailurePolicy(value admissionregistrationv1beta1.FailurePolicyType) *ValidatingAdmissionPolicySpecApplyConfiguration { + b.FailurePolicy = &value + return b +} + +// WithAuditAnnotations adds the given value to the AuditAnnotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AuditAnnotations field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithAuditAnnotations(values ...*AuditAnnotationApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAuditAnnotations") + } + b.AuditAnnotations = append(b.AuditAnnotations, *values[i]) + } + return b +} + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} + +// WithVariables adds the given value to the Variables field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Variables field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithVariables(values ...*VariableApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithVariables") + } + b.Variables = append(b.Variables, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go new file mode 100644 index 000000000..e3e6d417e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyStatusApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyStatus type for use +// with apply. +type ValidatingAdmissionPolicyStatusApplyConfiguration struct { + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + TypeChecking *TypeCheckingApplyConfiguration `json:"typeChecking,omitempty"` + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ValidatingAdmissionPolicyStatusApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyStatus type for use with +// apply. +func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration { + return &ValidatingAdmissionPolicyStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithObservedGeneration(value int64) *ValidatingAdmissionPolicyStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithTypeChecking sets the TypeChecking field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TypeChecking field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithTypeChecking(value *TypeCheckingApplyConfiguration) *ValidatingAdmissionPolicyStatusApplyConfiguration { + b.TypeChecking = value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ValidatingAdmissionPolicyStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go index 8ca0e9cbe..8c5c341ba 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go @@ -20,7 +20,8 @@ package v1beta1 import ( admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + v1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ValidatingWebhookApplyConfiguration represents an declarative configuration of the ValidatingWebhook type for use @@ -28,14 +29,15 @@ import ( type ValidatingWebhookApplyConfiguration struct { Name *string `json:"name,omitempty"` ClientConfig *WebhookClientConfigApplyConfiguration `json:"clientConfig,omitempty"` - Rules []RuleWithOperationsApplyConfiguration `json:"rules,omitempty"` + Rules []v1.RuleWithOperationsApplyConfiguration `json:"rules,omitempty"` FailurePolicy *admissionregistrationv1beta1.FailurePolicyType `json:"failurePolicy,omitempty"` MatchPolicy *admissionregistrationv1beta1.MatchPolicyType `json:"matchPolicy,omitempty"` - NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` - ObjectSelector *v1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"` + NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + ObjectSelector *metav1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"` SideEffects *admissionregistrationv1beta1.SideEffectClass `json:"sideEffects,omitempty"` TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } // ValidatingWebhookApplyConfiguration constructs an declarative configuration of the ValidatingWebhook type for use with @@ -63,7 +65,7 @@ func (b *ValidatingWebhookApplyConfiguration) WithClientConfig(value *WebhookCli // WithRules adds the given value to the Rules field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Rules field. -func (b *ValidatingWebhookApplyConfiguration) WithRules(values ...*RuleWithOperationsApplyConfiguration) *ValidatingWebhookApplyConfiguration { +func (b *ValidatingWebhookApplyConfiguration) WithRules(values ...*v1.RuleWithOperationsApplyConfiguration) *ValidatingWebhookApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithRules") @@ -92,7 +94,7 @@ func (b *ValidatingWebhookApplyConfiguration) WithMatchPolicy(value admissionreg // WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NamespaceSelector field is set to the value of the last call. -func (b *ValidatingWebhookApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *ValidatingWebhookApplyConfiguration { +func (b *ValidatingWebhookApplyConfiguration) WithNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *ValidatingWebhookApplyConfiguration { b.NamespaceSelector = value return b } @@ -100,7 +102,7 @@ func (b *ValidatingWebhookApplyConfiguration) WithNamespaceSelector(value *v1.La // WithObjectSelector sets the ObjectSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObjectSelector field is set to the value of the last call. -func (b *ValidatingWebhookApplyConfiguration) WithObjectSelector(value *v1.LabelSelectorApplyConfiguration) *ValidatingWebhookApplyConfiguration { +func (b *ValidatingWebhookApplyConfiguration) WithObjectSelector(value *metav1.LabelSelectorApplyConfiguration) *ValidatingWebhookApplyConfiguration { b.ObjectSelector = value return b } @@ -130,3 +132,16 @@ func (b *ValidatingWebhookApplyConfiguration) WithAdmissionReviewVersions(values } return b } + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *ValidatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingWebhookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go new file mode 100644 index 000000000..ed9ff1ac0 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ValidationApplyConfiguration represents an declarative configuration of the Validation type for use +// with apply. +type ValidationApplyConfiguration struct { + Expression *string `json:"expression,omitempty"` + Message *string `json:"message,omitempty"` + Reason *v1.StatusReason `json:"reason,omitempty"` + MessageExpression *string `json:"messageExpression,omitempty"` +} + +// ValidationApplyConfiguration constructs an declarative configuration of the Validation type for use with +// apply. +func Validation() *ValidationApplyConfiguration { + return &ValidationApplyConfiguration{} +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithExpression(value string) *ValidationApplyConfiguration { + b.Expression = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithMessage(value string) *ValidationApplyConfiguration { + b.Message = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithReason(value v1.StatusReason) *ValidationApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessageExpression sets the MessageExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MessageExpression field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithMessageExpression(value string) *ValidationApplyConfiguration { + b.MessageExpression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go new file mode 100644 index 000000000..0fc294c65 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// VariableApplyConfiguration represents an declarative configuration of the Variable type for use +// with apply. +type VariableApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` +} + +// VariableApplyConfiguration constructs an declarative configuration of the Variable type for use with +// apply. +func Variable() *VariableApplyConfiguration { + return &VariableApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *VariableApplyConfiguration) WithName(value string) *VariableApplyConfiguration { + b.Name = &value + return b +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *VariableApplyConfiguration) WithExpression(value string) *VariableApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go index d36f7603c..81c56330b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go @@ -24,6 +24,7 @@ type ServerStorageVersionApplyConfiguration struct { APIServerID *string `json:"apiServerID,omitempty"` EncodingVersion *string `json:"encodingVersion,omitempty"` DecodableVersions []string `json:"decodableVersions,omitempty"` + ServedVersions []string `json:"servedVersions,omitempty"` } // ServerStorageVersionApplyConfiguration constructs an declarative configuration of the ServerStorageVersion type for use with @@ -57,3 +58,13 @@ func (b *ServerStorageVersionApplyConfiguration) WithDecodableVersions(values .. } return b } + +// WithServedVersions adds the given value to the ServedVersions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ServedVersions field. +func (b *ServerStorageVersionApplyConfiguration) WithServedVersions(values ...string) *ServerStorageVersionApplyConfiguration { + for i := range values { + b.ServedVersions = append(b.ServedVersions, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go new file mode 100644 index 000000000..9778f1c4a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// StatefulSetOrdinalsApplyConfiguration represents an declarative configuration of the StatefulSetOrdinals type for use +// with apply. +type StatefulSetOrdinalsApplyConfiguration struct { + Start *int32 `json:"start,omitempty"` +} + +// StatefulSetOrdinalsApplyConfiguration constructs an declarative configuration of the StatefulSetOrdinals type for use with +// apply. +func StatefulSetOrdinals() *StatefulSetOrdinalsApplyConfiguration { + return &StatefulSetOrdinalsApplyConfiguration{} +} + +// WithStart sets the Start field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Start field is set to the value of the last call. +func (b *StatefulSetOrdinalsApplyConfiguration) WithStart(value int32) *StatefulSetOrdinalsApplyConfiguration { + b.Start = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go index ee0ed40a5..81afdca59 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go @@ -37,6 +37,7 @@ type StatefulSetSpecApplyConfiguration struct { RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration `json:"persistentVolumeClaimRetentionPolicy,omitempty"` + Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"` } // StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with @@ -129,3 +130,11 @@ func (b *StatefulSetSpecApplyConfiguration) WithPersistentVolumeClaimRetentionPo b.PersistentVolumeClaimRetentionPolicy = value return b } + +// WithOrdinals sets the Ordinals field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ordinals field is set to the value of the last call. +func (b *StatefulSetSpecApplyConfiguration) WithOrdinals(value *StatefulSetOrdinalsApplyConfiguration) *StatefulSetSpecApplyConfiguration { + b.Ordinals = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go similarity index 50% rename from vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go rename to vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go index 30c3724cf..8f349a2d2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go @@ -18,22 +18,22 @@ limitations under the License. package v1beta1 -// AllowedFlexVolumeApplyConfiguration represents an declarative configuration of the AllowedFlexVolume type for use +// StatefulSetOrdinalsApplyConfiguration represents an declarative configuration of the StatefulSetOrdinals type for use // with apply. -type AllowedFlexVolumeApplyConfiguration struct { - Driver *string `json:"driver,omitempty"` +type StatefulSetOrdinalsApplyConfiguration struct { + Start *int32 `json:"start,omitempty"` } -// AllowedFlexVolumeApplyConfiguration constructs an declarative configuration of the AllowedFlexVolume type for use with +// StatefulSetOrdinalsApplyConfiguration constructs an declarative configuration of the StatefulSetOrdinals type for use with // apply. -func AllowedFlexVolume() *AllowedFlexVolumeApplyConfiguration { - return &AllowedFlexVolumeApplyConfiguration{} +func StatefulSetOrdinals() *StatefulSetOrdinalsApplyConfiguration { + return &StatefulSetOrdinalsApplyConfiguration{} } -// WithDriver sets the Driver field in the declarative configuration to the given value +// WithStart sets the Start field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Driver field is set to the value of the last call. -func (b *AllowedFlexVolumeApplyConfiguration) WithDriver(value string) *AllowedFlexVolumeApplyConfiguration { - b.Driver = &value +// If called multiple times, the Start field is set to the value of the last call. +func (b *StatefulSetOrdinalsApplyConfiguration) WithStart(value int32) *StatefulSetOrdinalsApplyConfiguration { + b.Start = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go index 886433d9e..1eb1ba7b0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go @@ -37,6 +37,7 @@ type StatefulSetSpecApplyConfiguration struct { RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration `json:"persistentVolumeClaimRetentionPolicy,omitempty"` + Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"` } // StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with @@ -129,3 +130,11 @@ func (b *StatefulSetSpecApplyConfiguration) WithPersistentVolumeClaimRetentionPo b.PersistentVolumeClaimRetentionPolicy = value return b } + +// WithOrdinals sets the Ordinals field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ordinals field is set to the value of the last call. +func (b *StatefulSetSpecApplyConfiguration) WithOrdinals(value *StatefulSetOrdinalsApplyConfiguration) *StatefulSetSpecApplyConfiguration { + b.Ordinals = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go new file mode 100644 index 000000000..c586da775 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// StatefulSetOrdinalsApplyConfiguration represents an declarative configuration of the StatefulSetOrdinals type for use +// with apply. +type StatefulSetOrdinalsApplyConfiguration struct { + Start *int32 `json:"start,omitempty"` +} + +// StatefulSetOrdinalsApplyConfiguration constructs an declarative configuration of the StatefulSetOrdinals type for use with +// apply. +func StatefulSetOrdinals() *StatefulSetOrdinalsApplyConfiguration { + return &StatefulSetOrdinalsApplyConfiguration{} +} + +// WithStart sets the Start field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Start field is set to the value of the last call. +func (b *StatefulSetOrdinalsApplyConfiguration) WithStart(value int32) *StatefulSetOrdinalsApplyConfiguration { + b.Start = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go index 08922cea5..b6165fbd9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go @@ -37,6 +37,7 @@ type StatefulSetSpecApplyConfiguration struct { RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration `json:"persistentVolumeClaimRetentionPolicy,omitempty"` + Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"` } // StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with @@ -129,3 +130,11 @@ func (b *StatefulSetSpecApplyConfiguration) WithPersistentVolumeClaimRetentionPo b.PersistentVolumeClaimRetentionPolicy = value return b } + +// WithOrdinals sets the Ordinals field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ordinals field is set to the value of the last call. +func (b *StatefulSetSpecApplyConfiguration) WithOrdinals(value *StatefulSetOrdinalsApplyConfiguration) *StatefulSetSpecApplyConfiguration { + b.Ordinals = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go index e14244889..3d46a3ecf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go @@ -21,7 +21,7 @@ package v1 import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/client-go/applyconfigurations/core/v1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // JobSpecApplyConfiguration represents an declarative configuration of the JobSpec type for use @@ -30,13 +30,17 @@ type JobSpecApplyConfiguration struct { Parallelism *int32 `json:"parallelism,omitempty"` Completions *int32 `json:"completions,omitempty"` ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` + PodFailurePolicy *PodFailurePolicyApplyConfiguration `json:"podFailurePolicy,omitempty"` BackoffLimit *int32 `json:"backoffLimit,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty"` + MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` ManualSelector *bool `json:"manualSelector,omitempty"` Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty"` CompletionMode *batchv1.CompletionMode `json:"completionMode,omitempty"` Suspend *bool `json:"suspend,omitempty"` + PodReplacementPolicy *batchv1.PodReplacementPolicy `json:"podReplacementPolicy,omitempty"` } // JobSpecApplyConfiguration constructs an declarative configuration of the JobSpec type for use with @@ -69,6 +73,14 @@ func (b *JobSpecApplyConfiguration) WithActiveDeadlineSeconds(value int64) *JobS return b } +// WithPodFailurePolicy sets the PodFailurePolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PodFailurePolicy field is set to the value of the last call. +func (b *JobSpecApplyConfiguration) WithPodFailurePolicy(value *PodFailurePolicyApplyConfiguration) *JobSpecApplyConfiguration { + b.PodFailurePolicy = value + return b +} + // WithBackoffLimit sets the BackoffLimit field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the BackoffLimit field is set to the value of the last call. @@ -77,10 +89,26 @@ func (b *JobSpecApplyConfiguration) WithBackoffLimit(value int32) *JobSpecApplyC return b } +// WithBackoffLimitPerIndex sets the BackoffLimitPerIndex field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BackoffLimitPerIndex field is set to the value of the last call. +func (b *JobSpecApplyConfiguration) WithBackoffLimitPerIndex(value int32) *JobSpecApplyConfiguration { + b.BackoffLimitPerIndex = &value + return b +} + +// WithMaxFailedIndexes sets the MaxFailedIndexes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxFailedIndexes field is set to the value of the last call. +func (b *JobSpecApplyConfiguration) WithMaxFailedIndexes(value int32) *JobSpecApplyConfiguration { + b.MaxFailedIndexes = &value + return b +} + // WithSelector sets the Selector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Selector field is set to the value of the last call. -func (b *JobSpecApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *JobSpecApplyConfiguration { +func (b *JobSpecApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *JobSpecApplyConfiguration { b.Selector = value return b } @@ -124,3 +152,11 @@ func (b *JobSpecApplyConfiguration) WithSuspend(value bool) *JobSpecApplyConfigu b.Suspend = &value return b } + +// WithPodReplacementPolicy sets the PodReplacementPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PodReplacementPolicy field is set to the value of the last call. +func (b *JobSpecApplyConfiguration) WithPodReplacementPolicy(value batchv1.PodReplacementPolicy) *JobSpecApplyConfiguration { + b.PodReplacementPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go index a36d5d0ae..e8e472f8f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go @@ -31,7 +31,9 @@ type JobStatusApplyConfiguration struct { Active *int32 `json:"active,omitempty"` Succeeded *int32 `json:"succeeded,omitempty"` Failed *int32 `json:"failed,omitempty"` + Terminating *int32 `json:"terminating,omitempty"` CompletedIndexes *string `json:"completedIndexes,omitempty"` + FailedIndexes *string `json:"failedIndexes,omitempty"` UncountedTerminatedPods *UncountedTerminatedPodsApplyConfiguration `json:"uncountedTerminatedPods,omitempty"` Ready *int32 `json:"ready,omitempty"` } @@ -95,6 +97,14 @@ func (b *JobStatusApplyConfiguration) WithFailed(value int32) *JobStatusApplyCon return b } +// WithTerminating sets the Terminating field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Terminating field is set to the value of the last call. +func (b *JobStatusApplyConfiguration) WithTerminating(value int32) *JobStatusApplyConfiguration { + b.Terminating = &value + return b +} + // WithCompletedIndexes sets the CompletedIndexes field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CompletedIndexes field is set to the value of the last call. @@ -103,6 +113,14 @@ func (b *JobStatusApplyConfiguration) WithCompletedIndexes(value string) *JobSta return b } +// WithFailedIndexes sets the FailedIndexes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FailedIndexes field is set to the value of the last call. +func (b *JobStatusApplyConfiguration) WithFailedIndexes(value string) *JobStatusApplyConfiguration { + b.FailedIndexes = &value + return b +} + // WithUncountedTerminatedPods sets the UncountedTerminatedPods field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UncountedTerminatedPods field is set to the value of the last call. diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go new file mode 100644 index 000000000..6da98386c --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PodFailurePolicyApplyConfiguration represents an declarative configuration of the PodFailurePolicy type for use +// with apply. +type PodFailurePolicyApplyConfiguration struct { + Rules []PodFailurePolicyRuleApplyConfiguration `json:"rules,omitempty"` +} + +// PodFailurePolicyApplyConfiguration constructs an declarative configuration of the PodFailurePolicy type for use with +// apply. +func PodFailurePolicy() *PodFailurePolicyApplyConfiguration { + return &PodFailurePolicyApplyConfiguration{} +} + +// WithRules adds the given value to the Rules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Rules field. +func (b *PodFailurePolicyApplyConfiguration) WithRules(values ...*PodFailurePolicyRuleApplyConfiguration) *PodFailurePolicyApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRules") + } + b.Rules = append(b.Rules, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go new file mode 100644 index 000000000..65f625181 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go @@ -0,0 +1,63 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/batch/v1" +) + +// PodFailurePolicyOnExitCodesRequirementApplyConfiguration represents an declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use +// with apply. +type PodFailurePolicyOnExitCodesRequirementApplyConfiguration struct { + ContainerName *string `json:"containerName,omitempty"` + Operator *v1.PodFailurePolicyOnExitCodesOperator `json:"operator,omitempty"` + Values []int32 `json:"values,omitempty"` +} + +// PodFailurePolicyOnExitCodesRequirementApplyConfiguration constructs an declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use with +// apply. +func PodFailurePolicyOnExitCodesRequirement() *PodFailurePolicyOnExitCodesRequirementApplyConfiguration { + return &PodFailurePolicyOnExitCodesRequirementApplyConfiguration{} +} + +// WithContainerName sets the ContainerName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ContainerName field is set to the value of the last call. +func (b *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) WithContainerName(value string) *PodFailurePolicyOnExitCodesRequirementApplyConfiguration { + b.ContainerName = &value + return b +} + +// WithOperator sets the Operator field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Operator field is set to the value of the last call. +func (b *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) WithOperator(value v1.PodFailurePolicyOnExitCodesOperator) *PodFailurePolicyOnExitCodesRequirementApplyConfiguration { + b.Operator = &value + return b +} + +// WithValues adds the given value to the Values field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Values field. +func (b *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) WithValues(values ...int32) *PodFailurePolicyOnExitCodesRequirementApplyConfiguration { + for i := range values { + b.Values = append(b.Values, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go new file mode 100644 index 000000000..da1556ff8 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// PodFailurePolicyOnPodConditionsPatternApplyConfiguration represents an declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use +// with apply. +type PodFailurePolicyOnPodConditionsPatternApplyConfiguration struct { + Type *v1.PodConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` +} + +// PodFailurePolicyOnPodConditionsPatternApplyConfiguration constructs an declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use with +// apply. +func PodFailurePolicyOnPodConditionsPattern() *PodFailurePolicyOnPodConditionsPatternApplyConfiguration { + return &PodFailurePolicyOnPodConditionsPatternApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithType(value v1.PodConditionType) *PodFailurePolicyOnPodConditionsPatternApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithStatus(value v1.ConditionStatus) *PodFailurePolicyOnPodConditionsPatternApplyConfiguration { + b.Status = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go new file mode 100644 index 000000000..d43524353 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/batch/v1" +) + +// PodFailurePolicyRuleApplyConfiguration represents an declarative configuration of the PodFailurePolicyRule type for use +// with apply. +type PodFailurePolicyRuleApplyConfiguration struct { + Action *v1.PodFailurePolicyAction `json:"action,omitempty"` + OnExitCodes *PodFailurePolicyOnExitCodesRequirementApplyConfiguration `json:"onExitCodes,omitempty"` + OnPodConditions []PodFailurePolicyOnPodConditionsPatternApplyConfiguration `json:"onPodConditions,omitempty"` +} + +// PodFailurePolicyRuleApplyConfiguration constructs an declarative configuration of the PodFailurePolicyRule type for use with +// apply. +func PodFailurePolicyRule() *PodFailurePolicyRuleApplyConfiguration { + return &PodFailurePolicyRuleApplyConfiguration{} +} + +// WithAction sets the Action field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Action field is set to the value of the last call. +func (b *PodFailurePolicyRuleApplyConfiguration) WithAction(value v1.PodFailurePolicyAction) *PodFailurePolicyRuleApplyConfiguration { + b.Action = &value + return b +} + +// WithOnExitCodes sets the OnExitCodes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OnExitCodes field is set to the value of the last call. +func (b *PodFailurePolicyRuleApplyConfiguration) WithOnExitCodes(value *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) *PodFailurePolicyRuleApplyConfiguration { + b.OnExitCodes = value + return b +} + +// WithOnPodConditions adds the given value to the OnPodConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OnPodConditions field. +func (b *PodFailurePolicyRuleApplyConfiguration) WithOnPodConditions(values ...*PodFailurePolicyOnPodConditionsPatternApplyConfiguration) *PodFailurePolicyRuleApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOnPodConditions") + } + b.OnPodConditions = append(b.OnPodConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go new file mode 100644 index 000000000..788d2a07d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go @@ -0,0 +1,247 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterTrustBundleApplyConfiguration represents an declarative configuration of the ClusterTrustBundle type for use +// with apply. +type ClusterTrustBundleApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ClusterTrustBundleSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ClusterTrustBundle constructs an declarative configuration of the ClusterTrustBundle type for use with +// apply. +func ClusterTrustBundle(name string) *ClusterTrustBundleApplyConfiguration { + b := &ClusterTrustBundleApplyConfiguration{} + b.WithName(name) + b.WithKind("ClusterTrustBundle") + b.WithAPIVersion("certificates.k8s.io/v1alpha1") + return b +} + +// ExtractClusterTrustBundle extracts the applied configuration owned by fieldManager from +// clusterTrustBundle. If no managedFields are found in clusterTrustBundle for fieldManager, a +// ClusterTrustBundleApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// clusterTrustBundle must be a unmodified ClusterTrustBundle API object that was retrieved from the Kubernetes API. +// ExtractClusterTrustBundle provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractClusterTrustBundle(clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, fieldManager string) (*ClusterTrustBundleApplyConfiguration, error) { + return extractClusterTrustBundle(clusterTrustBundle, fieldManager, "") +} + +// ExtractClusterTrustBundleStatus is the same as ExtractClusterTrustBundle except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterTrustBundleStatus(clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, fieldManager string) (*ClusterTrustBundleApplyConfiguration, error) { + return extractClusterTrustBundle(clusterTrustBundle, fieldManager, "status") +} + +func extractClusterTrustBundle(clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, fieldManager string, subresource string) (*ClusterTrustBundleApplyConfiguration, error) { + b := &ClusterTrustBundleApplyConfiguration{} + err := managedfields.ExtractInto(clusterTrustBundle, internal.Parser().Type("io.k8s.api.certificates.v1alpha1.ClusterTrustBundle"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(clusterTrustBundle.Name) + + b.WithKind("ClusterTrustBundle") + b.WithAPIVersion("certificates.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithKind(value string) *ClusterTrustBundleApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithAPIVersion(value string) *ClusterTrustBundleApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithName(value string) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithGenerateName(value string) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithNamespace(value string) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithUID(value types.UID) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithResourceVersion(value string) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithGeneration(value int64) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ClusterTrustBundleApplyConfiguration) WithLabels(entries map[string]string) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ClusterTrustBundleApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ClusterTrustBundleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ClusterTrustBundleApplyConfiguration) WithFinalizers(values ...string) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ClusterTrustBundleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithSpec(value *ClusterTrustBundleSpecApplyConfiguration) *ClusterTrustBundleApplyConfiguration { + b.Spec = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go new file mode 100644 index 000000000..d1aea1d6d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ClusterTrustBundleSpecApplyConfiguration represents an declarative configuration of the ClusterTrustBundleSpec type for use +// with apply. +type ClusterTrustBundleSpecApplyConfiguration struct { + SignerName *string `json:"signerName,omitempty"` + TrustBundle *string `json:"trustBundle,omitempty"` +} + +// ClusterTrustBundleSpecApplyConfiguration constructs an declarative configuration of the ClusterTrustBundleSpec type for use with +// apply. +func ClusterTrustBundleSpec() *ClusterTrustBundleSpecApplyConfiguration { + return &ClusterTrustBundleSpecApplyConfiguration{} +} + +// WithSignerName sets the SignerName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SignerName field is set to the value of the last call. +func (b *ClusterTrustBundleSpecApplyConfiguration) WithSignerName(value string) *ClusterTrustBundleSpecApplyConfiguration { + b.SignerName = &value + return b +} + +// WithTrustBundle sets the TrustBundle field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TrustBundle field is set to the value of the last call. +func (b *ClusterTrustBundleSpecApplyConfiguration) WithTrustBundle(value string) *ClusterTrustBundleSpecApplyConfiguration { + b.TrustBundle = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/claimsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/claimsource.go new file mode 100644 index 000000000..2153570fc --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/claimsource.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ClaimSourceApplyConfiguration represents an declarative configuration of the ClaimSource type for use +// with apply. +type ClaimSourceApplyConfiguration struct { + ResourceClaimName *string `json:"resourceClaimName,omitempty"` + ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty"` +} + +// ClaimSourceApplyConfiguration constructs an declarative configuration of the ClaimSource type for use with +// apply. +func ClaimSource() *ClaimSourceApplyConfiguration { + return &ClaimSourceApplyConfiguration{} +} + +// WithResourceClaimName sets the ResourceClaimName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceClaimName field is set to the value of the last call. +func (b *ClaimSourceApplyConfiguration) WithResourceClaimName(value string) *ClaimSourceApplyConfiguration { + b.ResourceClaimName = &value + return b +} + +// WithResourceClaimTemplateName sets the ResourceClaimTemplateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceClaimTemplateName field is set to the value of the last call. +func (b *ClaimSourceApplyConfiguration) WithResourceClaimTemplateName(value string) *ClaimSourceApplyConfiguration { + b.ResourceClaimTemplateName = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go index d3b066d9c..32d715606 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go @@ -25,28 +25,30 @@ import ( // ContainerApplyConfiguration represents an declarative configuration of the Container type for use // with apply. type ContainerApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Image *string `json:"image,omitempty"` - Command []string `json:"command,omitempty"` - Args []string `json:"args,omitempty"` - WorkingDir *string `json:"workingDir,omitempty"` - Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"` - EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"` - Env []EnvVarApplyConfiguration `json:"env,omitempty"` - Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` - VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"` - VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"` - LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` - ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` - StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"` - Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"` - TerminationMessagePath *string `json:"terminationMessagePath,omitempty"` - TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` - ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` - SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"` - Stdin *bool `json:"stdin,omitempty"` - StdinOnce *bool `json:"stdinOnce,omitempty"` - TTY *bool `json:"tty,omitempty"` + Name *string `json:"name,omitempty"` + Image *string `json:"image,omitempty"` + Command []string `json:"command,omitempty"` + Args []string `json:"args,omitempty"` + WorkingDir *string `json:"workingDir,omitempty"` + Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"` + EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"` + Env []EnvVarApplyConfiguration `json:"env,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` + ResizePolicy []ContainerResizePolicyApplyConfiguration `json:"resizePolicy,omitempty"` + RestartPolicy *corev1.ContainerRestartPolicy `json:"restartPolicy,omitempty"` + VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"` + VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"` + LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` + ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` + StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"` + Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"` + TerminationMessagePath *string `json:"terminationMessagePath,omitempty"` + TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` + ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"` + Stdin *bool `json:"stdin,omitempty"` + StdinOnce *bool `json:"stdinOnce,omitempty"` + TTY *bool `json:"tty,omitempty"` } // ContainerApplyConfiguration constructs an declarative configuration of the Container type for use with @@ -146,6 +148,27 @@ func (b *ContainerApplyConfiguration) WithResources(value *ResourceRequirementsA return b } +// WithResizePolicy adds the given value to the ResizePolicy field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResizePolicy field. +func (b *ContainerApplyConfiguration) WithResizePolicy(values ...*ContainerResizePolicyApplyConfiguration) *ContainerApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResizePolicy") + } + b.ResizePolicy = append(b.ResizePolicy, *values[i]) + } + return b +} + +// WithRestartPolicy sets the RestartPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RestartPolicy field is set to the value of the last call. +func (b *ContainerApplyConfiguration) WithRestartPolicy(value corev1.ContainerRestartPolicy) *ContainerApplyConfiguration { + b.RestartPolicy = &value + return b +} + // WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumeMounts field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go new file mode 100644 index 000000000..bbbcbc9f1 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ContainerResizePolicyApplyConfiguration represents an declarative configuration of the ContainerResizePolicy type for use +// with apply. +type ContainerResizePolicyApplyConfiguration struct { + ResourceName *v1.ResourceName `json:"resourceName,omitempty"` + RestartPolicy *v1.ResourceResizeRestartPolicy `json:"restartPolicy,omitempty"` +} + +// ContainerResizePolicyApplyConfiguration constructs an declarative configuration of the ContainerResizePolicy type for use with +// apply. +func ContainerResizePolicy() *ContainerResizePolicyApplyConfiguration { + return &ContainerResizePolicyApplyConfiguration{} +} + +// WithResourceName sets the ResourceName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceName field is set to the value of the last call. +func (b *ContainerResizePolicyApplyConfiguration) WithResourceName(value v1.ResourceName) *ContainerResizePolicyApplyConfiguration { + b.ResourceName = &value + return b +} + +// WithRestartPolicy sets the RestartPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RestartPolicy field is set to the value of the last call. +func (b *ContainerResizePolicyApplyConfiguration) WithRestartPolicy(value v1.ResourceResizeRestartPolicy) *ContainerResizePolicyApplyConfiguration { + b.RestartPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go index 18d2925c1..2b98c4658 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go @@ -18,18 +18,24 @@ limitations under the License. package v1 +import ( + corev1 "k8s.io/api/core/v1" +) + // ContainerStatusApplyConfiguration represents an declarative configuration of the ContainerStatus type for use // with apply. type ContainerStatusApplyConfiguration struct { - Name *string `json:"name,omitempty"` - State *ContainerStateApplyConfiguration `json:"state,omitempty"` - LastTerminationState *ContainerStateApplyConfiguration `json:"lastState,omitempty"` - Ready *bool `json:"ready,omitempty"` - RestartCount *int32 `json:"restartCount,omitempty"` - Image *string `json:"image,omitempty"` - ImageID *string `json:"imageID,omitempty"` - ContainerID *string `json:"containerID,omitempty"` - Started *bool `json:"started,omitempty"` + Name *string `json:"name,omitempty"` + State *ContainerStateApplyConfiguration `json:"state,omitempty"` + LastTerminationState *ContainerStateApplyConfiguration `json:"lastState,omitempty"` + Ready *bool `json:"ready,omitempty"` + RestartCount *int32 `json:"restartCount,omitempty"` + Image *string `json:"image,omitempty"` + ImageID *string `json:"imageID,omitempty"` + ContainerID *string `json:"containerID,omitempty"` + Started *bool `json:"started,omitempty"` + AllocatedResources *corev1.ResourceList `json:"allocatedResources,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` } // ContainerStatusApplyConfiguration constructs an declarative configuration of the ContainerStatus type for use with @@ -109,3 +115,19 @@ func (b *ContainerStatusApplyConfiguration) WithStarted(value bool) *ContainerSt b.Started = &value return b } + +// WithAllocatedResources sets the AllocatedResources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllocatedResources field is set to the value of the last call. +func (b *ContainerStatusApplyConfiguration) WithAllocatedResources(value corev1.ResourceList) *ContainerStatusApplyConfiguration { + b.AllocatedResources = &value + return b +} + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *ContainerStatusApplyConfiguration) WithResources(value *ResourceRequirementsApplyConfiguration) *ContainerStatusApplyConfiguration { + b.Resources = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go index b8165445d..2fc681604 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go @@ -30,6 +30,7 @@ type CSIPersistentVolumeSourceApplyConfiguration struct { NodeStageSecretRef *SecretReferenceApplyConfiguration `json:"nodeStageSecretRef,omitempty"` NodePublishSecretRef *SecretReferenceApplyConfiguration `json:"nodePublishSecretRef,omitempty"` ControllerExpandSecretRef *SecretReferenceApplyConfiguration `json:"controllerExpandSecretRef,omitempty"` + NodeExpandSecretRef *SecretReferenceApplyConfiguration `json:"nodeExpandSecretRef,omitempty"` } // CSIPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the CSIPersistentVolumeSource type for use with @@ -115,3 +116,11 @@ func (b *CSIPersistentVolumeSourceApplyConfiguration) WithControllerExpandSecret b.ControllerExpandSecretRef = value return b } + +// WithNodeExpandSecretRef sets the NodeExpandSecretRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeExpandSecretRef field is set to the value of the last call. +func (b *CSIPersistentVolumeSourceApplyConfiguration) WithNodeExpandSecretRef(value *SecretReferenceApplyConfiguration) *CSIPersistentVolumeSourceApplyConfiguration { + b.NodeExpandSecretRef = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go index 6c24cd419..5fa79a246 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go @@ -126,6 +126,27 @@ func (b *EphemeralContainerApplyConfiguration) WithResources(value *ResourceRequ return b } +// WithResizePolicy adds the given value to the ResizePolicy field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResizePolicy field. +func (b *EphemeralContainerApplyConfiguration) WithResizePolicy(values ...*ContainerResizePolicyApplyConfiguration) *EphemeralContainerApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResizePolicy") + } + b.ResizePolicy = append(b.ResizePolicy, *values[i]) + } + return b +} + +// WithRestartPolicy sets the RestartPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RestartPolicy field is set to the value of the last call. +func (b *EphemeralContainerApplyConfiguration) WithRestartPolicy(value corev1.ContainerRestartPolicy) *EphemeralContainerApplyConfiguration { + b.RestartPolicy = &value + return b +} + // WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumeMounts field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go index 67e658cfa..8cded29a9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go @@ -25,28 +25,30 @@ import ( // EphemeralContainerCommonApplyConfiguration represents an declarative configuration of the EphemeralContainerCommon type for use // with apply. type EphemeralContainerCommonApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Image *string `json:"image,omitempty"` - Command []string `json:"command,omitempty"` - Args []string `json:"args,omitempty"` - WorkingDir *string `json:"workingDir,omitempty"` - Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"` - EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"` - Env []EnvVarApplyConfiguration `json:"env,omitempty"` - Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` - VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"` - VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"` - LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` - ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` - StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"` - Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"` - TerminationMessagePath *string `json:"terminationMessagePath,omitempty"` - TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` - ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` - SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"` - Stdin *bool `json:"stdin,omitempty"` - StdinOnce *bool `json:"stdinOnce,omitempty"` - TTY *bool `json:"tty,omitempty"` + Name *string `json:"name,omitempty"` + Image *string `json:"image,omitempty"` + Command []string `json:"command,omitempty"` + Args []string `json:"args,omitempty"` + WorkingDir *string `json:"workingDir,omitempty"` + Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"` + EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"` + Env []EnvVarApplyConfiguration `json:"env,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` + ResizePolicy []ContainerResizePolicyApplyConfiguration `json:"resizePolicy,omitempty"` + RestartPolicy *corev1.ContainerRestartPolicy `json:"restartPolicy,omitempty"` + VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"` + VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"` + LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` + ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` + StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"` + Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"` + TerminationMessagePath *string `json:"terminationMessagePath,omitempty"` + TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` + ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"` + Stdin *bool `json:"stdin,omitempty"` + StdinOnce *bool `json:"stdinOnce,omitempty"` + TTY *bool `json:"tty,omitempty"` } // EphemeralContainerCommonApplyConfiguration constructs an declarative configuration of the EphemeralContainerCommon type for use with @@ -146,6 +148,27 @@ func (b *EphemeralContainerCommonApplyConfiguration) WithResources(value *Resour return b } +// WithResizePolicy adds the given value to the ResizePolicy field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResizePolicy field. +func (b *EphemeralContainerCommonApplyConfiguration) WithResizePolicy(values ...*ContainerResizePolicyApplyConfiguration) *EphemeralContainerCommonApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResizePolicy") + } + b.ResizePolicy = append(b.ResizePolicy, *values[i]) + } + return b +} + +// WithRestartPolicy sets the RestartPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RestartPolicy field is set to the value of the last call. +func (b *EphemeralContainerCommonApplyConfiguration) WithRestartPolicy(value corev1.ContainerRestartPolicy) *EphemeralContainerCommonApplyConfiguration { + b.RestartPolicy = &value + return b +} + // WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumeMounts field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go new file mode 100644 index 000000000..c2a42cf74 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// HostIPApplyConfiguration represents an declarative configuration of the HostIP type for use +// with apply. +type HostIPApplyConfiguration struct { + IP *string `json:"ip,omitempty"` +} + +// HostIPApplyConfiguration constructs an declarative configuration of the HostIP type for use with +// apply. +func HostIP() *HostIPApplyConfiguration { + return &HostIPApplyConfiguration{} +} + +// WithIP sets the IP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IP field is set to the value of the last call. +func (b *HostIPApplyConfiguration) WithIP(value string) *HostIPApplyConfiguration { + b.IP = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go index e22d04b0d..f324584ab 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go @@ -33,7 +33,7 @@ type PersistentVolumeClaimSpecApplyConfiguration struct { StorageClassName *string `json:"storageClassName,omitempty"` VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"` DataSource *TypedLocalObjectReferenceApplyConfiguration `json:"dataSource,omitempty"` - DataSourceRef *TypedLocalObjectReferenceApplyConfiguration `json:"dataSourceRef,omitempty"` + DataSourceRef *TypedObjectReferenceApplyConfiguration `json:"dataSourceRef,omitempty"` } // PersistentVolumeClaimSpecApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimSpec type for use with @@ -103,7 +103,7 @@ func (b *PersistentVolumeClaimSpecApplyConfiguration) WithDataSource(value *Type // WithDataSourceRef sets the DataSourceRef field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DataSourceRef field is set to the value of the last call. -func (b *PersistentVolumeClaimSpecApplyConfiguration) WithDataSourceRef(value *TypedLocalObjectReferenceApplyConfiguration) *PersistentVolumeClaimSpecApplyConfiguration { +func (b *PersistentVolumeClaimSpecApplyConfiguration) WithDataSourceRef(value *TypedObjectReferenceApplyConfiguration) *PersistentVolumeClaimSpecApplyConfiguration { b.DataSourceRef = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go index 4c38d89f5..c29b2a9a1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go @@ -25,12 +25,12 @@ import ( // PersistentVolumeClaimStatusApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimStatus type for use // with apply. type PersistentVolumeClaimStatusApplyConfiguration struct { - Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"` - AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` - Capacity *v1.ResourceList `json:"capacity,omitempty"` - Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"` - AllocatedResources *v1.ResourceList `json:"allocatedResources,omitempty"` - ResizeStatus *v1.PersistentVolumeClaimResizeStatus `json:"resizeStatus,omitempty"` + Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"` + AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + Capacity *v1.ResourceList `json:"capacity,omitempty"` + Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"` + AllocatedResources *v1.ResourceList `json:"allocatedResources,omitempty"` + AllocatedResourceStatuses map[v1.ResourceName]v1.ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty"` } // PersistentVolumeClaimStatusApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimStatus type for use with @@ -86,10 +86,16 @@ func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResources(v return b } -// WithResizeStatus sets the ResizeStatus field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResizeStatus field is set to the value of the last call. -func (b *PersistentVolumeClaimStatusApplyConfiguration) WithResizeStatus(value v1.PersistentVolumeClaimResizeStatus) *PersistentVolumeClaimStatusApplyConfiguration { - b.ResizeStatus = &value +// WithAllocatedResourceStatuses puts the entries into the AllocatedResourceStatuses field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the AllocatedResourceStatuses field, +// overwriting an existing map entries in AllocatedResourceStatuses field with the same key. +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResourceStatuses(entries map[v1.ResourceName]v1.ClaimResourceStatus) *PersistentVolumeClaimStatusApplyConfiguration { + if b.AllocatedResourceStatuses == nil && len(entries) > 0 { + b.AllocatedResourceStatuses = make(map[v1.ResourceName]v1.ClaimResourceStatus, len(entries)) + } + for k, v := range entries { + b.AllocatedResourceStatuses[k] = v + } return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go index f7048dec4..a473c0e92 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go @@ -20,14 +20,16 @@ package v1 import ( v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // PersistentVolumeStatusApplyConfiguration represents an declarative configuration of the PersistentVolumeStatus type for use // with apply. type PersistentVolumeStatusApplyConfiguration struct { - Phase *v1.PersistentVolumePhase `json:"phase,omitempty"` - Message *string `json:"message,omitempty"` - Reason *string `json:"reason,omitempty"` + Phase *v1.PersistentVolumePhase `json:"phase,omitempty"` + Message *string `json:"message,omitempty"` + Reason *string `json:"reason,omitempty"` + LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty"` } // PersistentVolumeStatusApplyConfiguration constructs an declarative configuration of the PersistentVolumeStatus type for use with @@ -59,3 +61,11 @@ func (b *PersistentVolumeStatusApplyConfiguration) WithReason(value string) *Per b.Reason = &value return b } + +// WithLastPhaseTransitionTime sets the LastPhaseTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastPhaseTransitionTime field is set to the value of the last call. +func (b *PersistentVolumeStatusApplyConfiguration) WithLastPhaseTransitionTime(value metav1.Time) *PersistentVolumeStatusApplyConfiguration { + b.LastPhaseTransitionTime = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go similarity index 51% rename from vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go rename to vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go index 86601cc48..69b250d47 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go @@ -16,37 +16,33 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v2 +package v1 -import ( - v1 "k8s.io/api/core/v1" -) - -// PodResourceMetricSourceApplyConfiguration represents an declarative configuration of the PodResourceMetricSource type for use +// PodResourceClaimApplyConfiguration represents an declarative configuration of the PodResourceClaim type for use // with apply. -type PodResourceMetricSourceApplyConfiguration struct { - Name *v1.ResourceName `json:"name,omitempty"` - Target *MetricTargetApplyConfiguration `json:"target,omitempty"` +type PodResourceClaimApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Source *ClaimSourceApplyConfiguration `json:"source,omitempty"` } -// PodResourceMetricSourceApplyConfiguration constructs an declarative configuration of the PodResourceMetricSource type for use with +// PodResourceClaimApplyConfiguration constructs an declarative configuration of the PodResourceClaim type for use with // apply. -func PodResourceMetricSource() *PodResourceMetricSourceApplyConfiguration { - return &PodResourceMetricSourceApplyConfiguration{} +func PodResourceClaim() *PodResourceClaimApplyConfiguration { + return &PodResourceClaimApplyConfiguration{} } // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *PodResourceMetricSourceApplyConfiguration) WithName(value v1.ResourceName) *PodResourceMetricSourceApplyConfiguration { +func (b *PodResourceClaimApplyConfiguration) WithName(value string) *PodResourceClaimApplyConfiguration { b.Name = &value return b } -// WithTarget sets the Target field in the declarative configuration to the given value +// WithSource sets the Source field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Target field is set to the value of the last call. -func (b *PodResourceMetricSourceApplyConfiguration) WithTarget(value *MetricTargetApplyConfiguration) *PodResourceMetricSourceApplyConfiguration { - b.Target = value +// If called multiple times, the Source field is set to the value of the last call. +func (b *PodResourceClaimApplyConfiguration) WithSource(value *ClaimSourceApplyConfiguration) *PodResourceClaimApplyConfiguration { + b.Source = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go new file mode 100644 index 000000000..ae79ca01b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PodResourceClaimStatusApplyConfiguration represents an declarative configuration of the PodResourceClaimStatus type for use +// with apply. +type PodResourceClaimStatusApplyConfiguration struct { + Name *string `json:"name,omitempty"` + ResourceClaimName *string `json:"resourceClaimName,omitempty"` +} + +// PodResourceClaimStatusApplyConfiguration constructs an declarative configuration of the PodResourceClaimStatus type for use with +// apply. +func PodResourceClaimStatus() *PodResourceClaimStatusApplyConfiguration { + return &PodResourceClaimStatusApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PodResourceClaimStatusApplyConfiguration) WithName(value string) *PodResourceClaimStatusApplyConfiguration { + b.Name = &value + return b +} + +// WithResourceClaimName sets the ResourceClaimName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceClaimName field is set to the value of the last call. +func (b *PodResourceClaimStatusApplyConfiguration) WithResourceClaimName(value string) *PodResourceClaimStatusApplyConfiguration { + b.ResourceClaimName = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go new file mode 100644 index 000000000..f7649c2e9 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PodSchedulingGateApplyConfiguration represents an declarative configuration of the PodSchedulingGate type for use +// with apply. +type PodSchedulingGateApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// PodSchedulingGateApplyConfiguration constructs an declarative configuration of the PodSchedulingGate type for use with +// apply. +func PodSchedulingGate() *PodSchedulingGateApplyConfiguration { + return &PodSchedulingGateApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PodSchedulingGateApplyConfiguration) WithName(value string) *PodSchedulingGateApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go index 015859e9a..a9acd36fc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go @@ -61,6 +61,9 @@ type PodSpecApplyConfiguration struct { TopologySpreadConstraints []TopologySpreadConstraintApplyConfiguration `json:"topologySpreadConstraints,omitempty"` SetHostnameAsFQDN *bool `json:"setHostnameAsFQDN,omitempty"` OS *PodOSApplyConfiguration `json:"os,omitempty"` + HostUsers *bool `json:"hostUsers,omitempty"` + SchedulingGates []PodSchedulingGateApplyConfiguration `json:"schedulingGates,omitempty"` + ResourceClaims []PodResourceClaimApplyConfiguration `json:"resourceClaims,omitempty"` } // PodSpecApplyConfiguration constructs an declarative configuration of the PodSpec type for use with @@ -407,3 +410,37 @@ func (b *PodSpecApplyConfiguration) WithOS(value *PodOSApplyConfiguration) *PodS b.OS = value return b } + +// WithHostUsers sets the HostUsers field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostUsers field is set to the value of the last call. +func (b *PodSpecApplyConfiguration) WithHostUsers(value bool) *PodSpecApplyConfiguration { + b.HostUsers = &value + return b +} + +// WithSchedulingGates adds the given value to the SchedulingGates field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the SchedulingGates field. +func (b *PodSpecApplyConfiguration) WithSchedulingGates(values ...*PodSchedulingGateApplyConfiguration) *PodSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSchedulingGates") + } + b.SchedulingGates = append(b.SchedulingGates, *values[i]) + } + return b +} + +// WithResourceClaims adds the given value to the ResourceClaims field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceClaims field. +func (b *PodSpecApplyConfiguration) WithResourceClaims(values ...*PodResourceClaimApplyConfiguration) *PodSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceClaims") + } + b.ResourceClaims = append(b.ResourceClaims, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go index 7ee5b9955..1a58ab6be 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go @@ -26,19 +26,22 @@ import ( // PodStatusApplyConfiguration represents an declarative configuration of the PodStatus type for use // with apply. type PodStatusApplyConfiguration struct { - Phase *v1.PodPhase `json:"phase,omitempty"` - Conditions []PodConditionApplyConfiguration `json:"conditions,omitempty"` - Message *string `json:"message,omitempty"` - Reason *string `json:"reason,omitempty"` - NominatedNodeName *string `json:"nominatedNodeName,omitempty"` - HostIP *string `json:"hostIP,omitempty"` - PodIP *string `json:"podIP,omitempty"` - PodIPs []PodIPApplyConfiguration `json:"podIPs,omitempty"` - StartTime *metav1.Time `json:"startTime,omitempty"` - InitContainerStatuses []ContainerStatusApplyConfiguration `json:"initContainerStatuses,omitempty"` - ContainerStatuses []ContainerStatusApplyConfiguration `json:"containerStatuses,omitempty"` - QOSClass *v1.PodQOSClass `json:"qosClass,omitempty"` - EphemeralContainerStatuses []ContainerStatusApplyConfiguration `json:"ephemeralContainerStatuses,omitempty"` + Phase *v1.PodPhase `json:"phase,omitempty"` + Conditions []PodConditionApplyConfiguration `json:"conditions,omitempty"` + Message *string `json:"message,omitempty"` + Reason *string `json:"reason,omitempty"` + NominatedNodeName *string `json:"nominatedNodeName,omitempty"` + HostIP *string `json:"hostIP,omitempty"` + HostIPs []HostIPApplyConfiguration `json:"hostIPs,omitempty"` + PodIP *string `json:"podIP,omitempty"` + PodIPs []PodIPApplyConfiguration `json:"podIPs,omitempty"` + StartTime *metav1.Time `json:"startTime,omitempty"` + InitContainerStatuses []ContainerStatusApplyConfiguration `json:"initContainerStatuses,omitempty"` + ContainerStatuses []ContainerStatusApplyConfiguration `json:"containerStatuses,omitempty"` + QOSClass *v1.PodQOSClass `json:"qosClass,omitempty"` + EphemeralContainerStatuses []ContainerStatusApplyConfiguration `json:"ephemeralContainerStatuses,omitempty"` + Resize *v1.PodResizeStatus `json:"resize,omitempty"` + ResourceClaimStatuses []PodResourceClaimStatusApplyConfiguration `json:"resourceClaimStatuses,omitempty"` } // PodStatusApplyConfiguration constructs an declarative configuration of the PodStatus type for use with @@ -100,6 +103,19 @@ func (b *PodStatusApplyConfiguration) WithHostIP(value string) *PodStatusApplyCo return b } +// WithHostIPs adds the given value to the HostIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the HostIPs field. +func (b *PodStatusApplyConfiguration) WithHostIPs(values ...*HostIPApplyConfiguration) *PodStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithHostIPs") + } + b.HostIPs = append(b.HostIPs, *values[i]) + } + return b +} + // WithPodIP sets the PodIP field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PodIP field is set to the value of the last call. @@ -175,3 +191,24 @@ func (b *PodStatusApplyConfiguration) WithEphemeralContainerStatuses(values ...* } return b } + +// WithResize sets the Resize field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resize field is set to the value of the last call. +func (b *PodStatusApplyConfiguration) WithResize(value v1.PodResizeStatus) *PodStatusApplyConfiguration { + b.Resize = &value + return b +} + +// WithResourceClaimStatuses adds the given value to the ResourceClaimStatuses field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceClaimStatuses field. +func (b *PodStatusApplyConfiguration) WithResourceClaimStatuses(values ...*PodResourceClaimStatusApplyConfiguration) *PodStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceClaimStatuses") + } + b.ResourceClaimStatuses = append(b.ResourceClaimStatuses, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go similarity index 65% rename from vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go rename to vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go index 27b49bf15..064dd4e2e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go @@ -16,24 +16,24 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1beta1 +package v1 -// AllowedCSIDriverApplyConfiguration represents an declarative configuration of the AllowedCSIDriver type for use +// ResourceClaimApplyConfiguration represents an declarative configuration of the ResourceClaim type for use // with apply. -type AllowedCSIDriverApplyConfiguration struct { +type ResourceClaimApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// AllowedCSIDriverApplyConfiguration constructs an declarative configuration of the AllowedCSIDriver type for use with +// ResourceClaimApplyConfiguration constructs an declarative configuration of the ResourceClaim type for use with // apply. -func AllowedCSIDriver() *AllowedCSIDriverApplyConfiguration { - return &AllowedCSIDriverApplyConfiguration{} +func ResourceClaim() *ResourceClaimApplyConfiguration { + return &ResourceClaimApplyConfiguration{} } // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *AllowedCSIDriverApplyConfiguration) WithName(value string) *AllowedCSIDriverApplyConfiguration { +func (b *ResourceClaimApplyConfiguration) WithName(value string) *ResourceClaimApplyConfiguration { b.Name = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go index d22f38479..9482b8d71 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go @@ -25,8 +25,9 @@ import ( // ResourceRequirementsApplyConfiguration represents an declarative configuration of the ResourceRequirements type for use // with apply. type ResourceRequirementsApplyConfiguration struct { - Limits *v1.ResourceList `json:"limits,omitempty"` - Requests *v1.ResourceList `json:"requests,omitempty"` + Limits *v1.ResourceList `json:"limits,omitempty"` + Requests *v1.ResourceList `json:"requests,omitempty"` + Claims []ResourceClaimApplyConfiguration `json:"claims,omitempty"` } // ResourceRequirementsApplyConfiguration constructs an declarative configuration of the ResourceRequirements type for use with @@ -50,3 +51,16 @@ func (b *ResourceRequirementsApplyConfiguration) WithRequests(value v1.ResourceL b.Requests = &value return b } + +// WithClaims adds the given value to the Claims field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Claims field. +func (b *ResourceRequirementsApplyConfiguration) WithClaims(values ...*ResourceClaimApplyConfiguration) *ResourceRequirementsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithClaims") + } + b.Claims = append(b.Claims, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go index 856cd4f9d..493af6fb3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go @@ -35,15 +35,15 @@ type ServiceSpecApplyConfiguration struct { LoadBalancerIP *string `json:"loadBalancerIP,omitempty"` LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"` ExternalName *string `json:"externalName,omitempty"` - ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty"` + ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicy `json:"externalTrafficPolicy,omitempty"` HealthCheckNodePort *int32 `json:"healthCheckNodePort,omitempty"` PublishNotReadyAddresses *bool `json:"publishNotReadyAddresses,omitempty"` SessionAffinityConfig *SessionAffinityConfigApplyConfiguration `json:"sessionAffinityConfig,omitempty"` IPFamilies []corev1.IPFamily `json:"ipFamilies,omitempty"` - IPFamilyPolicy *corev1.IPFamilyPolicyType `json:"ipFamilyPolicy,omitempty"` + IPFamilyPolicy *corev1.IPFamilyPolicy `json:"ipFamilyPolicy,omitempty"` AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty"` LoadBalancerClass *string `json:"loadBalancerClass,omitempty"` - InternalTrafficPolicy *corev1.ServiceInternalTrafficPolicyType `json:"internalTrafficPolicy,omitempty"` + InternalTrafficPolicy *corev1.ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty"` } // ServiceSpecApplyConfiguration constructs an declarative configuration of the ServiceSpec type for use with @@ -152,7 +152,7 @@ func (b *ServiceSpecApplyConfiguration) WithExternalName(value string) *ServiceS // WithExternalTrafficPolicy sets the ExternalTrafficPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ExternalTrafficPolicy field is set to the value of the last call. -func (b *ServiceSpecApplyConfiguration) WithExternalTrafficPolicy(value corev1.ServiceExternalTrafficPolicyType) *ServiceSpecApplyConfiguration { +func (b *ServiceSpecApplyConfiguration) WithExternalTrafficPolicy(value corev1.ServiceExternalTrafficPolicy) *ServiceSpecApplyConfiguration { b.ExternalTrafficPolicy = &value return b } @@ -194,7 +194,7 @@ func (b *ServiceSpecApplyConfiguration) WithIPFamilies(values ...corev1.IPFamily // WithIPFamilyPolicy sets the IPFamilyPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the IPFamilyPolicy field is set to the value of the last call. -func (b *ServiceSpecApplyConfiguration) WithIPFamilyPolicy(value corev1.IPFamilyPolicyType) *ServiceSpecApplyConfiguration { +func (b *ServiceSpecApplyConfiguration) WithIPFamilyPolicy(value corev1.IPFamilyPolicy) *ServiceSpecApplyConfiguration { b.IPFamilyPolicy = &value return b } @@ -218,7 +218,7 @@ func (b *ServiceSpecApplyConfiguration) WithLoadBalancerClass(value string) *Ser // WithInternalTrafficPolicy sets the InternalTrafficPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the InternalTrafficPolicy field is set to the value of the last call. -func (b *ServiceSpecApplyConfiguration) WithInternalTrafficPolicy(value corev1.ServiceInternalTrafficPolicyType) *ServiceSpecApplyConfiguration { +func (b *ServiceSpecApplyConfiguration) WithInternalTrafficPolicy(value corev1.ServiceInternalTrafficPolicy) *ServiceSpecApplyConfiguration { b.InternalTrafficPolicy = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go index 867cc89f2..fbfa8fa88 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go @@ -26,11 +26,14 @@ import ( // TopologySpreadConstraintApplyConfiguration represents an declarative configuration of the TopologySpreadConstraint type for use // with apply. type TopologySpreadConstraintApplyConfiguration struct { - MaxSkew *int32 `json:"maxSkew,omitempty"` - TopologyKey *string `json:"topologyKey,omitempty"` - WhenUnsatisfiable *v1.UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"` - LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` - MinDomains *int32 `json:"minDomains,omitempty"` + MaxSkew *int32 `json:"maxSkew,omitempty"` + TopologyKey *string `json:"topologyKey,omitempty"` + WhenUnsatisfiable *v1.UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"` + LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` + MinDomains *int32 `json:"minDomains,omitempty"` + NodeAffinityPolicy *v1.NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty"` + NodeTaintsPolicy *v1.NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty"` + MatchLabelKeys []string `json:"matchLabelKeys,omitempty"` } // TopologySpreadConstraintApplyConfiguration constructs an declarative configuration of the TopologySpreadConstraint type for use with @@ -78,3 +81,29 @@ func (b *TopologySpreadConstraintApplyConfiguration) WithMinDomains(value int32) b.MinDomains = &value return b } + +// WithNodeAffinityPolicy sets the NodeAffinityPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeAffinityPolicy field is set to the value of the last call. +func (b *TopologySpreadConstraintApplyConfiguration) WithNodeAffinityPolicy(value v1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration { + b.NodeAffinityPolicy = &value + return b +} + +// WithNodeTaintsPolicy sets the NodeTaintsPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeTaintsPolicy field is set to the value of the last call. +func (b *TopologySpreadConstraintApplyConfiguration) WithNodeTaintsPolicy(value v1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration { + b.NodeTaintsPolicy = &value + return b +} + +// WithMatchLabelKeys adds the given value to the MatchLabelKeys field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchLabelKeys field. +func (b *TopologySpreadConstraintApplyConfiguration) WithMatchLabelKeys(values ...string) *TopologySpreadConstraintApplyConfiguration { + for i := range values { + b.MatchLabelKeys = append(b.MatchLabelKeys, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go new file mode 100644 index 000000000..d9a01c9c3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// TypedObjectReferenceApplyConfiguration represents an declarative configuration of the TypedObjectReference type for use +// with apply. +type TypedObjectReferenceApplyConfiguration struct { + APIGroup *string `json:"apiGroup,omitempty"` + Kind *string `json:"kind,omitempty"` + Name *string `json:"name,omitempty"` + Namespace *string `json:"namespace,omitempty"` +} + +// TypedObjectReferenceApplyConfiguration constructs an declarative configuration of the TypedObjectReference type for use with +// apply. +func TypedObjectReference() *TypedObjectReferenceApplyConfiguration { + return &TypedObjectReferenceApplyConfiguration{} +} + +// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIGroup field is set to the value of the last call. +func (b *TypedObjectReferenceApplyConfiguration) WithAPIGroup(value string) *TypedObjectReferenceApplyConfiguration { + b.APIGroup = &value + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *TypedObjectReferenceApplyConfiguration) WithKind(value string) *TypedObjectReferenceApplyConfiguration { + b.Kind = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *TypedObjectReferenceApplyConfiguration) WithName(value string) *TypedObjectReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *TypedObjectReferenceApplyConfiguration) WithNamespace(value string) *TypedObjectReferenceApplyConfiguration { + b.Namespace = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go deleted file mode 100644 index 493815d8d..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// AllowedHostPathApplyConfiguration represents an declarative configuration of the AllowedHostPath type for use -// with apply. -type AllowedHostPathApplyConfiguration struct { - PathPrefix *string `json:"pathPrefix,omitempty"` - ReadOnly *bool `json:"readOnly,omitempty"` -} - -// AllowedHostPathApplyConfiguration constructs an declarative configuration of the AllowedHostPath type for use with -// apply. -func AllowedHostPath() *AllowedHostPathApplyConfiguration { - return &AllowedHostPathApplyConfiguration{} -} - -// WithPathPrefix sets the PathPrefix field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PathPrefix field is set to the value of the last call. -func (b *AllowedHostPathApplyConfiguration) WithPathPrefix(value string) *AllowedHostPathApplyConfiguration { - b.PathPrefix = &value - return b -} - -// WithReadOnly sets the ReadOnly field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ReadOnly field is set to the value of the last call. -func (b *AllowedHostPathApplyConfiguration) WithReadOnly(value bool) *AllowedHostPathApplyConfiguration { - b.ReadOnly = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go deleted file mode 100644 index c7434a6af..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// FSGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the FSGroupStrategyOptions type for use -// with apply. -type FSGroupStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.FSGroupStrategyType `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// FSGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the FSGroupStrategyOptions type for use with -// apply. -func FSGroupStrategyOptions() *FSGroupStrategyOptionsApplyConfiguration { - return &FSGroupStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *FSGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.FSGroupStrategyType) *FSGroupStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *FSGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *FSGroupStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go deleted file mode 100644 index 7c7968813..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// HostPortRangeApplyConfiguration represents an declarative configuration of the HostPortRange type for use -// with apply. -type HostPortRangeApplyConfiguration struct { - Min *int32 `json:"min,omitempty"` - Max *int32 `json:"max,omitempty"` -} - -// HostPortRangeApplyConfiguration constructs an declarative configuration of the HostPortRange type for use with -// apply. -func HostPortRange() *HostPortRangeApplyConfiguration { - return &HostPortRangeApplyConfiguration{} -} - -// WithMin sets the Min field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Min field is set to the value of the last call. -func (b *HostPortRangeApplyConfiguration) WithMin(value int32) *HostPortRangeApplyConfiguration { - b.Min = &value - return b -} - -// WithMax sets the Max field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Max field is set to the value of the last call. -func (b *HostPortRangeApplyConfiguration) WithMax(value int32) *HostPortRangeApplyConfiguration { - b.Max = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go deleted file mode 100644 index af46f7658..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// IDRangeApplyConfiguration represents an declarative configuration of the IDRange type for use -// with apply. -type IDRangeApplyConfiguration struct { - Min *int64 `json:"min,omitempty"` - Max *int64 `json:"max,omitempty"` -} - -// IDRangeApplyConfiguration constructs an declarative configuration of the IDRange type for use with -// apply. -func IDRange() *IDRangeApplyConfiguration { - return &IDRangeApplyConfiguration{} -} - -// WithMin sets the Min field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Min field is set to the value of the last call. -func (b *IDRangeApplyConfiguration) WithMin(value int64) *IDRangeApplyConfiguration { - b.Min = &value - return b -} - -// WithMax sets the Max field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Max field is set to the value of the last call. -func (b *IDRangeApplyConfiguration) WithMax(value int64) *IDRangeApplyConfiguration { - b.Max = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go new file mode 100644 index 000000000..20bf63780 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go @@ -0,0 +1,62 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// IngressLoadBalancerIngressApplyConfiguration represents an declarative configuration of the IngressLoadBalancerIngress type for use +// with apply. +type IngressLoadBalancerIngressApplyConfiguration struct { + IP *string `json:"ip,omitempty"` + Hostname *string `json:"hostname,omitempty"` + Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"` +} + +// IngressLoadBalancerIngressApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerIngress type for use with +// apply. +func IngressLoadBalancerIngress() *IngressLoadBalancerIngressApplyConfiguration { + return &IngressLoadBalancerIngressApplyConfiguration{} +} + +// WithIP sets the IP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IP field is set to the value of the last call. +func (b *IngressLoadBalancerIngressApplyConfiguration) WithIP(value string) *IngressLoadBalancerIngressApplyConfiguration { + b.IP = &value + return b +} + +// WithHostname sets the Hostname field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Hostname field is set to the value of the last call. +func (b *IngressLoadBalancerIngressApplyConfiguration) WithHostname(value string) *IngressLoadBalancerIngressApplyConfiguration { + b.Hostname = &value + return b +} + +// WithPorts adds the given value to the Ports field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ports field. +func (b *IngressLoadBalancerIngressApplyConfiguration) WithPorts(values ...*IngressPortStatusApplyConfiguration) *IngressLoadBalancerIngressApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPorts") + } + b.Ports = append(b.Ports, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go new file mode 100644 index 000000000..e16dd2363 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// IngressLoadBalancerStatusApplyConfiguration represents an declarative configuration of the IngressLoadBalancerStatus type for use +// with apply. +type IngressLoadBalancerStatusApplyConfiguration struct { + Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"` +} + +// IngressLoadBalancerStatusApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerStatus type for use with +// apply. +func IngressLoadBalancerStatus() *IngressLoadBalancerStatusApplyConfiguration { + return &IngressLoadBalancerStatusApplyConfiguration{} +} + +// WithIngress adds the given value to the Ingress field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ingress field. +func (b *IngressLoadBalancerStatusApplyConfiguration) WithIngress(values ...*IngressLoadBalancerIngressApplyConfiguration) *IngressLoadBalancerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithIngress") + } + b.Ingress = append(b.Ingress, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go new file mode 100644 index 000000000..083653797 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// IngressPortStatusApplyConfiguration represents an declarative configuration of the IngressPortStatus type for use +// with apply. +type IngressPortStatusApplyConfiguration struct { + Port *int32 `json:"port,omitempty"` + Protocol *v1.Protocol `json:"protocol,omitempty"` + Error *string `json:"error,omitempty"` +} + +// IngressPortStatusApplyConfiguration constructs an declarative configuration of the IngressPortStatus type for use with +// apply. +func IngressPortStatus() *IngressPortStatusApplyConfiguration { + return &IngressPortStatusApplyConfiguration{} +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *IngressPortStatusApplyConfiguration) WithPort(value int32) *IngressPortStatusApplyConfiguration { + b.Port = &value + return b +} + +// WithProtocol sets the Protocol field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Protocol field is set to the value of the last call. +func (b *IngressPortStatusApplyConfiguration) WithProtocol(value v1.Protocol) *IngressPortStatusApplyConfiguration { + b.Protocol = &value + return b +} + +// WithError sets the Error field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Error field is set to the value of the last call. +func (b *IngressPortStatusApplyConfiguration) WithError(value string) *IngressPortStatusApplyConfiguration { + b.Error = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go index 941769594..faa7e2446 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go @@ -18,14 +18,10 @@ limitations under the License. package v1beta1 -import ( - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - // IngressStatusApplyConfiguration represents an declarative configuration of the IngressStatus type for use // with apply. type IngressStatusApplyConfiguration struct { - LoadBalancer *v1.LoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` + LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` } // IngressStatusApplyConfiguration constructs an declarative configuration of the IngressStatus type for use with @@ -37,7 +33,7 @@ func IngressStatus() *IngressStatusApplyConfiguration { // WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LoadBalancer field is set to the value of the last call. -func (b *IngressStatusApplyConfiguration) WithLoadBalancer(value *v1.LoadBalancerStatusApplyConfiguration) *IngressStatusApplyConfiguration { +func (b *IngressStatusApplyConfiguration) WithLoadBalancer(value *IngressLoadBalancerStatusApplyConfiguration) *IngressStatusApplyConfiguration { b.LoadBalancer = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go index 81c84d2d4..27ea5d9dd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go @@ -32,8 +32,7 @@ import ( type NetworkPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"` - Status *NetworkPolicyStatusApplyConfiguration `json:"status,omitempty"` + Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"` } // NetworkPolicy constructs an declarative configuration of the NetworkPolicy type for use with @@ -248,11 +247,3 @@ func (b *NetworkPolicyApplyConfiguration) WithSpec(value *NetworkPolicySpecApply b.Spec = value return b } - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *NetworkPolicyApplyConfiguration) WithStatus(value *NetworkPolicyStatusApplyConfiguration) *NetworkPolicyApplyConfiguration { - b.Status = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go deleted file mode 100644 index de3949dc9..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go +++ /dev/null @@ -1,285 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// PodSecurityPolicySpecApplyConfiguration represents an declarative configuration of the PodSecurityPolicySpec type for use -// with apply. -type PodSecurityPolicySpecApplyConfiguration struct { - Privileged *bool `json:"privileged,omitempty"` - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty"` - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty"` - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty"` - Volumes []v1beta1.FSType `json:"volumes,omitempty"` - HostNetwork *bool `json:"hostNetwork,omitempty"` - HostPorts []HostPortRangeApplyConfiguration `json:"hostPorts,omitempty"` - HostPID *bool `json:"hostPID,omitempty"` - HostIPC *bool `json:"hostIPC,omitempty"` - SELinux *SELinuxStrategyOptionsApplyConfiguration `json:"seLinux,omitempty"` - RunAsUser *RunAsUserStrategyOptionsApplyConfiguration `json:"runAsUser,omitempty"` - RunAsGroup *RunAsGroupStrategyOptionsApplyConfiguration `json:"runAsGroup,omitempty"` - SupplementalGroups *SupplementalGroupsStrategyOptionsApplyConfiguration `json:"supplementalGroups,omitempty"` - FSGroup *FSGroupStrategyOptionsApplyConfiguration `json:"fsGroup,omitempty"` - ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty"` - DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty"` - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty"` - AllowedHostPaths []AllowedHostPathApplyConfiguration `json:"allowedHostPaths,omitempty"` - AllowedFlexVolumes []AllowedFlexVolumeApplyConfiguration `json:"allowedFlexVolumes,omitempty"` - AllowedCSIDrivers []AllowedCSIDriverApplyConfiguration `json:"allowedCSIDrivers,omitempty"` - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty"` - ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty"` - AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty"` - RuntimeClass *RuntimeClassStrategyOptionsApplyConfiguration `json:"runtimeClass,omitempty"` -} - -// PodSecurityPolicySpecApplyConfiguration constructs an declarative configuration of the PodSecurityPolicySpec type for use with -// apply. -func PodSecurityPolicySpec() *PodSecurityPolicySpecApplyConfiguration { - return &PodSecurityPolicySpecApplyConfiguration{} -} - -// WithPrivileged sets the Privileged field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Privileged field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithPrivileged(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.Privileged = &value - return b -} - -// WithDefaultAddCapabilities adds the given value to the DefaultAddCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the DefaultAddCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithDefaultAddCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.DefaultAddCapabilities = append(b.DefaultAddCapabilities, values[i]) - } - return b -} - -// WithRequiredDropCapabilities adds the given value to the RequiredDropCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the RequiredDropCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRequiredDropCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.RequiredDropCapabilities = append(b.RequiredDropCapabilities, values[i]) - } - return b -} - -// WithAllowedCapabilities adds the given value to the AllowedCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedCapabilities = append(b.AllowedCapabilities, values[i]) - } - return b -} - -// WithVolumes adds the given value to the Volumes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Volumes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithVolumes(values ...v1beta1.FSType) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.Volumes = append(b.Volumes, values[i]) - } - return b -} - -// WithHostNetwork sets the HostNetwork field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostNetwork field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostNetwork(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostNetwork = &value - return b -} - -// WithHostPorts adds the given value to the HostPorts field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the HostPorts field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostPorts(values ...*HostPortRangeApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithHostPorts") - } - b.HostPorts = append(b.HostPorts, *values[i]) - } - return b -} - -// WithHostPID sets the HostPID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostPID field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostPID(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostPID = &value - return b -} - -// WithHostIPC sets the HostIPC field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostIPC field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostIPC(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostIPC = &value - return b -} - -// WithSELinux sets the SELinux field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SELinux field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithSELinux(value *SELinuxStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.SELinux = value - return b -} - -// WithRunAsUser sets the RunAsUser field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RunAsUser field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRunAsUser(value *RunAsUserStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RunAsUser = value - return b -} - -// WithRunAsGroup sets the RunAsGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RunAsGroup field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRunAsGroup(value *RunAsGroupStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RunAsGroup = value - return b -} - -// WithSupplementalGroups sets the SupplementalGroups field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SupplementalGroups field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithSupplementalGroups(value *SupplementalGroupsStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.SupplementalGroups = value - return b -} - -// WithFSGroup sets the FSGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the FSGroup field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithFSGroup(value *FSGroupStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.FSGroup = value - return b -} - -// WithReadOnlyRootFilesystem sets the ReadOnlyRootFilesystem field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ReadOnlyRootFilesystem field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithReadOnlyRootFilesystem(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.ReadOnlyRootFilesystem = &value - return b -} - -// WithDefaultAllowPrivilegeEscalation sets the DefaultAllowPrivilegeEscalation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DefaultAllowPrivilegeEscalation field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithDefaultAllowPrivilegeEscalation(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.DefaultAllowPrivilegeEscalation = &value - return b -} - -// WithAllowPrivilegeEscalation sets the AllowPrivilegeEscalation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AllowPrivilegeEscalation field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowPrivilegeEscalation(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.AllowPrivilegeEscalation = &value - return b -} - -// WithAllowedHostPaths adds the given value to the AllowedHostPaths field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedHostPaths field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedHostPaths(values ...*AllowedHostPathApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedHostPaths") - } - b.AllowedHostPaths = append(b.AllowedHostPaths, *values[i]) - } - return b -} - -// WithAllowedFlexVolumes adds the given value to the AllowedFlexVolumes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedFlexVolumes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedFlexVolumes(values ...*AllowedFlexVolumeApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedFlexVolumes") - } - b.AllowedFlexVolumes = append(b.AllowedFlexVolumes, *values[i]) - } - return b -} - -// WithAllowedCSIDrivers adds the given value to the AllowedCSIDrivers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedCSIDrivers field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedCSIDrivers(values ...*AllowedCSIDriverApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedCSIDrivers") - } - b.AllowedCSIDrivers = append(b.AllowedCSIDrivers, *values[i]) - } - return b -} - -// WithAllowedUnsafeSysctls adds the given value to the AllowedUnsafeSysctls field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedUnsafeSysctls field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedUnsafeSysctls(values ...string) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedUnsafeSysctls = append(b.AllowedUnsafeSysctls, values[i]) - } - return b -} - -// WithForbiddenSysctls adds the given value to the ForbiddenSysctls field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ForbiddenSysctls field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithForbiddenSysctls(values ...string) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.ForbiddenSysctls = append(b.ForbiddenSysctls, values[i]) - } - return b -} - -// WithAllowedProcMountTypes adds the given value to the AllowedProcMountTypes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedProcMountTypes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedProcMountTypes(values ...v1.ProcMountType) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedProcMountTypes = append(b.AllowedProcMountTypes, values[i]) - } - return b -} - -// WithRuntimeClass sets the RuntimeClass field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RuntimeClass field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRuntimeClass(value *RuntimeClassStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RuntimeClass = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go deleted file mode 100644 index 75e76e85f..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// RunAsGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsGroupStrategyOptions type for use -// with apply. -type RunAsGroupStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.RunAsGroupStrategy `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// RunAsGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsGroupStrategyOptions type for use with -// apply. -func RunAsGroupStrategyOptions() *RunAsGroupStrategyOptionsApplyConfiguration { - return &RunAsGroupStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsGroupStrategy) *RunAsGroupStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsGroupStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go deleted file mode 100644 index 712c1675a..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// RunAsUserStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsUserStrategyOptions type for use -// with apply. -type RunAsUserStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.RunAsUserStrategy `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// RunAsUserStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsUserStrategyOptions type for use with -// apply. -func RunAsUserStrategyOptions() *RunAsUserStrategyOptionsApplyConfiguration { - return &RunAsUserStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *RunAsUserStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsUserStrategy) *RunAsUserStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *RunAsUserStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsUserStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go deleted file mode 100644 index c19a7ce61..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// RuntimeClassStrategyOptionsApplyConfiguration represents an declarative configuration of the RuntimeClassStrategyOptions type for use -// with apply. -type RuntimeClassStrategyOptionsApplyConfiguration struct { - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames,omitempty"` - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty"` -} - -// RuntimeClassStrategyOptionsApplyConfiguration constructs an declarative configuration of the RuntimeClassStrategyOptions type for use with -// apply. -func RuntimeClassStrategyOptions() *RuntimeClassStrategyOptionsApplyConfiguration { - return &RuntimeClassStrategyOptionsApplyConfiguration{} -} - -// WithAllowedRuntimeClassNames adds the given value to the AllowedRuntimeClassNames field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedRuntimeClassNames field. -func (b *RuntimeClassStrategyOptionsApplyConfiguration) WithAllowedRuntimeClassNames(values ...string) *RuntimeClassStrategyOptionsApplyConfiguration { - for i := range values { - b.AllowedRuntimeClassNames = append(b.AllowedRuntimeClassNames, values[i]) - } - return b -} - -// WithDefaultRuntimeClassName sets the DefaultRuntimeClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DefaultRuntimeClassName field is set to the value of the last call. -func (b *RuntimeClassStrategyOptionsApplyConfiguration) WithDefaultRuntimeClassName(value string) *RuntimeClassStrategyOptionsApplyConfiguration { - b.DefaultRuntimeClassName = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go deleted file mode 100644 index 265906a73..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - -// SELinuxStrategyOptionsApplyConfiguration represents an declarative configuration of the SELinuxStrategyOptions type for use -// with apply. -type SELinuxStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.SELinuxStrategy `json:"rule,omitempty"` - SELinuxOptions *v1.SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"` -} - -// SELinuxStrategyOptionsApplyConfiguration constructs an declarative configuration of the SELinuxStrategyOptions type for use with -// apply. -func SELinuxStrategyOptions() *SELinuxStrategyOptionsApplyConfiguration { - return &SELinuxStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *SELinuxStrategyOptionsApplyConfiguration) WithRule(value v1beta1.SELinuxStrategy) *SELinuxStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithSELinuxOptions sets the SELinuxOptions field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SELinuxOptions field is set to the value of the last call. -func (b *SELinuxStrategyOptionsApplyConfiguration) WithSELinuxOptions(value *v1.SELinuxOptionsApplyConfiguration) *SELinuxStrategyOptionsApplyConfiguration { - b.SELinuxOptions = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go deleted file mode 100644 index ec4313812..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// SupplementalGroupsStrategyOptionsApplyConfiguration represents an declarative configuration of the SupplementalGroupsStrategyOptions type for use -// with apply. -type SupplementalGroupsStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.SupplementalGroupsStrategyType `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// SupplementalGroupsStrategyOptionsApplyConfiguration constructs an declarative configuration of the SupplementalGroupsStrategyOptions type for use with -// apply. -func SupplementalGroupsStrategyOptions() *SupplementalGroupsStrategyOptionsApplyConfiguration { - return &SupplementalGroupsStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRule(value v1beta1.SupplementalGroupsStrategyType) *SupplementalGroupsStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *SupplementalGroupsStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/exemptprioritylevelconfiguration.go new file mode 100644 index 000000000..3535d7478 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/exemptprioritylevelconfiguration.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use +// with apply. +type ExemptPriorityLevelConfigurationApplyConfiguration struct { + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` + LendablePercent *int32 `json:"lendablePercent,omitempty"` +} + +// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with +// apply. +func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration { + return &ExemptPriorityLevelConfigurationApplyConfiguration{} +} + +// WithNominalConcurrencyShares sets the NominalConcurrencyShares field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NominalConcurrencyShares field is set to the value of the last call. +func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithNominalConcurrencyShares(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration { + b.NominalConcurrencyShares = &value + return b +} + +// WithLendablePercent sets the LendablePercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LendablePercent field is set to the value of the last call. +func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithLendablePercent(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration { + b.LendablePercent = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitedprioritylevelconfiguration.go index df3fecbd7..10660e81a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitedprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitedprioritylevelconfiguration.go @@ -23,6 +23,8 @@ package v1alpha1 type LimitedPriorityLevelConfigurationApplyConfiguration struct { AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"` LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"` + LendablePercent *int32 `json:"lendablePercent,omitempty"` + BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` } // LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with @@ -46,3 +48,19 @@ func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithLimitResponse( b.LimitResponse = value return b } + +// WithLendablePercent sets the LendablePercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LendablePercent field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithLendablePercent(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.LendablePercent = &value + return b +} + +// WithBorrowingLimitPercent sets the BorrowingLimitPercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BorrowingLimitPercent field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithBorrowingLimitPercent(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.BorrowingLimitPercent = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationspec.go index 3949dee46..ade920a75 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationspec.go @@ -27,6 +27,7 @@ import ( type PriorityLevelConfigurationSpecApplyConfiguration struct { Type *v1alpha1.PriorityLevelEnablement `json:"type,omitempty"` Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` + Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } // PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with @@ -50,3 +51,11 @@ func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithLimited(value *Li b.Limited = value return b } + +// WithExempt sets the Exempt field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Exempt field is set to the value of the last call. +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithExempt(value *ExemptPriorityLevelConfigurationApplyConfiguration) *PriorityLevelConfigurationSpecApplyConfiguration { + b.Exempt = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go new file mode 100644 index 000000000..071048090 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use +// with apply. +type ExemptPriorityLevelConfigurationApplyConfiguration struct { + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` + LendablePercent *int32 `json:"lendablePercent,omitempty"` +} + +// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with +// apply. +func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration { + return &ExemptPriorityLevelConfigurationApplyConfiguration{} +} + +// WithNominalConcurrencyShares sets the NominalConcurrencyShares field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NominalConcurrencyShares field is set to the value of the last call. +func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithNominalConcurrencyShares(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration { + b.NominalConcurrencyShares = &value + return b +} + +// WithLendablePercent sets the LendablePercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LendablePercent field is set to the value of the last call. +func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithLendablePercent(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration { + b.LendablePercent = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go index f7f77f9b2..6f57169e1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go @@ -23,6 +23,8 @@ package v1beta1 type LimitedPriorityLevelConfigurationApplyConfiguration struct { AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"` LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"` + LendablePercent *int32 `json:"lendablePercent,omitempty"` + BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` } // LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with @@ -46,3 +48,19 @@ func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithLimitResponse( b.LimitResponse = value return b } + +// WithLendablePercent sets the LendablePercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LendablePercent field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithLendablePercent(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.LendablePercent = &value + return b +} + +// WithBorrowingLimitPercent sets the BorrowingLimitPercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BorrowingLimitPercent field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithBorrowingLimitPercent(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.BorrowingLimitPercent = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go index 8ed4e399f..19146d9f6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go @@ -27,6 +27,7 @@ import ( type PriorityLevelConfigurationSpecApplyConfiguration struct { Type *v1beta1.PriorityLevelEnablement `json:"type,omitempty"` Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` + Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } // PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with @@ -50,3 +51,11 @@ func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithLimited(value *Li b.Limited = value return b } + +// WithExempt sets the Exempt field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Exempt field is set to the value of the last call. +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithExempt(value *ExemptPriorityLevelConfigurationApplyConfiguration) *PriorityLevelConfigurationSpecApplyConfiguration { + b.Exempt = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go new file mode 100644 index 000000000..d6bc330fe --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use +// with apply. +type ExemptPriorityLevelConfigurationApplyConfiguration struct { + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` + LendablePercent *int32 `json:"lendablePercent,omitempty"` +} + +// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with +// apply. +func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration { + return &ExemptPriorityLevelConfigurationApplyConfiguration{} +} + +// WithNominalConcurrencyShares sets the NominalConcurrencyShares field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NominalConcurrencyShares field is set to the value of the last call. +func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithNominalConcurrencyShares(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration { + b.NominalConcurrencyShares = &value + return b +} + +// WithLendablePercent sets the LendablePercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LendablePercent field is set to the value of the last call. +func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithLendablePercent(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration { + b.LendablePercent = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go index e25f7f6b0..563b185c7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go @@ -23,6 +23,8 @@ package v1beta2 type LimitedPriorityLevelConfigurationApplyConfiguration struct { AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"` LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"` + LendablePercent *int32 `json:"lendablePercent,omitempty"` + BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` } // LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with @@ -46,3 +48,19 @@ func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithLimitResponse( b.LimitResponse = value return b } + +// WithLendablePercent sets the LendablePercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LendablePercent field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithLendablePercent(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.LendablePercent = &value + return b +} + +// WithBorrowingLimitPercent sets the BorrowingLimitPercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BorrowingLimitPercent field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithBorrowingLimitPercent(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.BorrowingLimitPercent = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go index 5560ed9e5..994a8a16a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go @@ -27,6 +27,7 @@ import ( type PriorityLevelConfigurationSpecApplyConfiguration struct { Type *v1beta2.PriorityLevelEnablement `json:"type,omitempty"` Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` + Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } // PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with @@ -50,3 +51,11 @@ func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithLimited(value *Li b.Limited = value return b } + +// WithExempt sets the Exempt field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Exempt field is set to the value of the last call. +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithExempt(value *ExemptPriorityLevelConfigurationApplyConfiguration) *PriorityLevelConfigurationSpecApplyConfiguration { + b.Exempt = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go new file mode 100644 index 000000000..b03c11d0d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use +// with apply. +type ExemptPriorityLevelConfigurationApplyConfiguration struct { + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` + LendablePercent *int32 `json:"lendablePercent,omitempty"` +} + +// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with +// apply. +func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration { + return &ExemptPriorityLevelConfigurationApplyConfiguration{} +} + +// WithNominalConcurrencyShares sets the NominalConcurrencyShares field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NominalConcurrencyShares field is set to the value of the last call. +func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithNominalConcurrencyShares(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration { + b.NominalConcurrencyShares = &value + return b +} + +// WithLendablePercent sets the LendablePercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LendablePercent field is set to the value of the last call. +func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithLendablePercent(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration { + b.LendablePercent = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go new file mode 100644 index 000000000..cd4572593 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go @@ -0,0 +1,43 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +import ( + v1beta3 "k8s.io/api/flowcontrol/v1beta3" +) + +// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use +// with apply. +type FlowDistinguisherMethodApplyConfiguration struct { + Type *v1beta3.FlowDistinguisherMethodType `json:"type,omitempty"` +} + +// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with +// apply. +func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { + return &FlowDistinguisherMethodApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1beta3.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go new file mode 100644 index 000000000..c95635360 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go @@ -0,0 +1,256 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +import ( + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use +// with apply. +type FlowSchemaApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *FlowSchemaSpecApplyConfiguration `json:"spec,omitempty"` + Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` +} + +// FlowSchema constructs an declarative configuration of the FlowSchema type for use with +// apply. +func FlowSchema(name string) *FlowSchemaApplyConfiguration { + b := &FlowSchemaApplyConfiguration{} + b.WithName(name) + b.WithKind("FlowSchema") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta3") + return b +} + +// ExtractFlowSchema extracts the applied configuration owned by fieldManager from +// flowSchema. If no managedFields are found in flowSchema for fieldManager, a +// FlowSchemaApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// flowSchema must be a unmodified FlowSchema API object that was retrieved from the Kubernetes API. +// ExtractFlowSchema provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractFlowSchema(flowSchema *flowcontrolv1beta3.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { + return extractFlowSchema(flowSchema, fieldManager, "") +} + +// ExtractFlowSchemaStatus is the same as ExtractFlowSchema except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractFlowSchemaStatus(flowSchema *flowcontrolv1beta3.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { + return extractFlowSchema(flowSchema, fieldManager, "status") +} + +func extractFlowSchema(flowSchema *flowcontrolv1beta3.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) { + b := &FlowSchemaApplyConfiguration{} + err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta3.FlowSchema"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(flowSchema.Name) + + b.WithKind("FlowSchema") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta3") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *FlowSchemaApplyConfiguration) WithAnnotations(entries map[string]string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *FlowSchemaApplyConfiguration) WithFinalizers(values ...string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *FlowSchemaApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithSpec(value *FlowSchemaSpecApplyConfiguration) *FlowSchemaApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyConfiguration) *FlowSchemaApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go new file mode 100644 index 000000000..0ef3a2c92 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go @@ -0,0 +1,80 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +import ( + v1beta3 "k8s.io/api/flowcontrol/v1beta3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use +// with apply. +type FlowSchemaConditionApplyConfiguration struct { + Type *v1beta3.FlowSchemaConditionType `json:"type,omitempty"` + Status *v1beta3.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with +// apply. +func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { + return &FlowSchemaConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta3.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1beta3.ConditionStatus) *FlowSchemaConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *FlowSchemaConditionApplyConfiguration) WithLastTransitionTime(value v1.Time) *FlowSchemaConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *FlowSchemaConditionApplyConfiguration) WithReason(value string) *FlowSchemaConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *FlowSchemaConditionApplyConfiguration) WithMessage(value string) *FlowSchemaConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go new file mode 100644 index 000000000..e077ed3fd --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use +// with apply. +type FlowSchemaSpecApplyConfiguration struct { + PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"` + MatchingPrecedence *int32 `json:"matchingPrecedence,omitempty"` + DistinguisherMethod *FlowDistinguisherMethodApplyConfiguration `json:"distinguisherMethod,omitempty"` + Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"` +} + +// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with +// apply. +func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration { + return &FlowSchemaSpecApplyConfiguration{} +} + +// WithPriorityLevelConfiguration sets the PriorityLevelConfiguration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PriorityLevelConfiguration field is set to the value of the last call. +func (b *FlowSchemaSpecApplyConfiguration) WithPriorityLevelConfiguration(value *PriorityLevelConfigurationReferenceApplyConfiguration) *FlowSchemaSpecApplyConfiguration { + b.PriorityLevelConfiguration = value + return b +} + +// WithMatchingPrecedence sets the MatchingPrecedence field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchingPrecedence field is set to the value of the last call. +func (b *FlowSchemaSpecApplyConfiguration) WithMatchingPrecedence(value int32) *FlowSchemaSpecApplyConfiguration { + b.MatchingPrecedence = &value + return b +} + +// WithDistinguisherMethod sets the DistinguisherMethod field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DistinguisherMethod field is set to the value of the last call. +func (b *FlowSchemaSpecApplyConfiguration) WithDistinguisherMethod(value *FlowDistinguisherMethodApplyConfiguration) *FlowSchemaSpecApplyConfiguration { + b.DistinguisherMethod = value + return b +} + +// WithRules adds the given value to the Rules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Rules field. +func (b *FlowSchemaSpecApplyConfiguration) WithRules(values ...*PolicyRulesWithSubjectsApplyConfiguration) *FlowSchemaSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRules") + } + b.Rules = append(b.Rules, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go similarity index 61% rename from vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicystatus.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go index 032de18ed..18db1c932 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicystatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go @@ -16,28 +16,24 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1beta3 -import ( - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// NetworkPolicyStatusApplyConfiguration represents an declarative configuration of the NetworkPolicyStatus type for use +// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use // with apply. -type NetworkPolicyStatusApplyConfiguration struct { - Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +type FlowSchemaStatusApplyConfiguration struct { + Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"` } -// NetworkPolicyStatusApplyConfiguration constructs an declarative configuration of the NetworkPolicyStatus type for use with +// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with // apply. -func NetworkPolicyStatus() *NetworkPolicyStatusApplyConfiguration { - return &NetworkPolicyStatusApplyConfiguration{} +func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration { + return &FlowSchemaStatusApplyConfiguration{} } // WithConditions adds the given value to the Conditions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *NetworkPolicyStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *NetworkPolicyStatusApplyConfiguration { +func (b *FlowSchemaStatusApplyConfiguration) WithConditions(values ...*FlowSchemaConditionApplyConfiguration) *FlowSchemaStatusApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithConditions") diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go new file mode 100644 index 000000000..b919b711b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use +// with apply. +type GroupSubjectApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with +// apply. +func GroupSubject() *GroupSubjectApplyConfiguration { + return &GroupSubjectApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *GroupSubjectApplyConfiguration) WithName(value string) *GroupSubjectApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go new file mode 100644 index 000000000..269a48721 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use +// with apply. +type LimitedPriorityLevelConfigurationApplyConfiguration struct { + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` + LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"` + LendablePercent *int32 `json:"lendablePercent,omitempty"` + BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` +} + +// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with +// apply. +func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration { + return &LimitedPriorityLevelConfigurationApplyConfiguration{} +} + +// WithNominalConcurrencyShares sets the NominalConcurrencyShares field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NominalConcurrencyShares field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithNominalConcurrencyShares(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.NominalConcurrencyShares = &value + return b +} + +// WithLimitResponse sets the LimitResponse field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LimitResponse field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithLimitResponse(value *LimitResponseApplyConfiguration) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.LimitResponse = value + return b +} + +// WithLendablePercent sets the LendablePercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LendablePercent field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithLendablePercent(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.LendablePercent = &value + return b +} + +// WithBorrowingLimitPercent sets the BorrowingLimitPercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BorrowingLimitPercent field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithBorrowingLimitPercent(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.BorrowingLimitPercent = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go new file mode 100644 index 000000000..b7a64ebfe --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +import ( + v1beta3 "k8s.io/api/flowcontrol/v1beta3" +) + +// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use +// with apply. +type LimitResponseApplyConfiguration struct { + Type *v1beta3.LimitResponseType `json:"type,omitempty"` + Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` +} + +// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with +// apply. +func LimitResponse() *LimitResponseApplyConfiguration { + return &LimitResponseApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *LimitResponseApplyConfiguration) WithType(value v1beta3.LimitResponseType) *LimitResponseApplyConfiguration { + b.Type = &value + return b +} + +// WithQueuing sets the Queuing field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Queuing field is set to the value of the last call. +func (b *LimitResponseApplyConfiguration) WithQueuing(value *QueuingConfigurationApplyConfiguration) *LimitResponseApplyConfiguration { + b.Queuing = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go new file mode 100644 index 000000000..ecb47f52c --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use +// with apply. +type NonResourcePolicyRuleApplyConfiguration struct { + Verbs []string `json:"verbs,omitempty"` + NonResourceURLs []string `json:"nonResourceURLs,omitempty"` +} + +// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with +// apply. +func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration { + return &NonResourcePolicyRuleApplyConfiguration{} +} + +// WithVerbs adds the given value to the Verbs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Verbs field. +func (b *NonResourcePolicyRuleApplyConfiguration) WithVerbs(values ...string) *NonResourcePolicyRuleApplyConfiguration { + for i := range values { + b.Verbs = append(b.Verbs, values[i]) + } + return b +} + +// WithNonResourceURLs adds the given value to the NonResourceURLs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NonResourceURLs field. +func (b *NonResourcePolicyRuleApplyConfiguration) WithNonResourceURLs(values ...string) *NonResourcePolicyRuleApplyConfiguration { + for i := range values { + b.NonResourceURLs = append(b.NonResourceURLs, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go new file mode 100644 index 000000000..e30aace19 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go @@ -0,0 +1,72 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use +// with apply. +type PolicyRulesWithSubjectsApplyConfiguration struct { + Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` + ResourceRules []ResourcePolicyRuleApplyConfiguration `json:"resourceRules,omitempty"` + NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"` +} + +// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with +// apply. +func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration { + return &PolicyRulesWithSubjectsApplyConfiguration{} +} + +// WithSubjects adds the given value to the Subjects field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Subjects field. +func (b *PolicyRulesWithSubjectsApplyConfiguration) WithSubjects(values ...*SubjectApplyConfiguration) *PolicyRulesWithSubjectsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSubjects") + } + b.Subjects = append(b.Subjects, *values[i]) + } + return b +} + +// WithResourceRules adds the given value to the ResourceRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceRules field. +func (b *PolicyRulesWithSubjectsApplyConfiguration) WithResourceRules(values ...*ResourcePolicyRuleApplyConfiguration) *PolicyRulesWithSubjectsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceRules") + } + b.ResourceRules = append(b.ResourceRules, *values[i]) + } + return b +} + +// WithNonResourceRules adds the given value to the NonResourceRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NonResourceRules field. +func (b *PolicyRulesWithSubjectsApplyConfiguration) WithNonResourceRules(values ...*NonResourcePolicyRuleApplyConfiguration) *PolicyRulesWithSubjectsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithNonResourceRules") + } + b.NonResourceRules = append(b.NonResourceRules, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go new file mode 100644 index 000000000..6fbbbea8f --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go @@ -0,0 +1,256 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +import ( + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use +// with apply. +type PriorityLevelConfigurationApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PriorityLevelConfigurationSpecApplyConfiguration `json:"spec,omitempty"` + Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` +} + +// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with +// apply. +func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration { + b := &PriorityLevelConfigurationApplyConfiguration{} + b.WithName(name) + b.WithKind("PriorityLevelConfiguration") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta3") + return b +} + +// ExtractPriorityLevelConfiguration extracts the applied configuration owned by fieldManager from +// priorityLevelConfiguration. If no managedFields are found in priorityLevelConfiguration for fieldManager, a +// PriorityLevelConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// priorityLevelConfiguration must be a unmodified PriorityLevelConfiguration API object that was retrieved from the Kubernetes API. +// ExtractPriorityLevelConfiguration provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { + return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "") +} + +// ExtractPriorityLevelConfigurationStatus is the same as ExtractPriorityLevelConfiguration except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { + return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "status") +} + +func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) { + b := &PriorityLevelConfigurationApplyConfiguration{} + err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfiguration"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(priorityLevelConfiguration.Name) + + b.WithKind("PriorityLevelConfiguration") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta3") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *PriorityLevelConfigurationApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value string) *PriorityLevelConfigurationApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int64) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *PriorityLevelConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *PriorityLevelConfigurationApplyConfiguration) WithFinalizers(values ...string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *PriorityLevelConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithSpec(value *PriorityLevelConfigurationSpecApplyConfiguration) *PriorityLevelConfigurationApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *PriorityLevelConfigurationStatusApplyConfiguration) *PriorityLevelConfigurationApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go new file mode 100644 index 000000000..6e36b6a07 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go @@ -0,0 +1,80 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +import ( + v1beta3 "k8s.io/api/flowcontrol/v1beta3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use +// with apply. +type PriorityLevelConfigurationConditionApplyConfiguration struct { + Type *v1beta3.PriorityLevelConfigurationConditionType `json:"type,omitempty"` + Status *v1beta3.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with +// apply. +func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration { + return &PriorityLevelConfigurationConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1beta3.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1beta3.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithLastTransitionTime(value v1.Time) *PriorityLevelConfigurationConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithReason(value string) *PriorityLevelConfigurationConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithMessage(value string) *PriorityLevelConfigurationConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go new file mode 100644 index 000000000..cb827b1e6 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use +// with apply. +type PriorityLevelConfigurationReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with +// apply. +func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration { + return &PriorityLevelConfigurationReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PriorityLevelConfigurationReferenceApplyConfiguration) WithName(value string) *PriorityLevelConfigurationReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go new file mode 100644 index 000000000..5b0680d91 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +import ( + v1beta3 "k8s.io/api/flowcontrol/v1beta3" +) + +// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use +// with apply. +type PriorityLevelConfigurationSpecApplyConfiguration struct { + Type *v1beta3.PriorityLevelEnablement `json:"type,omitempty"` + Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` + Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` +} + +// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with +// apply. +func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration { + return &PriorityLevelConfigurationSpecApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1beta3.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { + b.Type = &value + return b +} + +// WithLimited sets the Limited field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Limited field is set to the value of the last call. +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithLimited(value *LimitedPriorityLevelConfigurationApplyConfiguration) *PriorityLevelConfigurationSpecApplyConfiguration { + b.Limited = value + return b +} + +// WithExempt sets the Exempt field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Exempt field is set to the value of the last call. +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithExempt(value *ExemptPriorityLevelConfigurationApplyConfiguration) *PriorityLevelConfigurationSpecApplyConfiguration { + b.Exempt = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go similarity index 57% rename from vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicystatus.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go index 99c89b09b..0ee9e306e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicystatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go @@ -16,28 +16,24 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1beta1 +package v1beta3 -import ( - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// NetworkPolicyStatusApplyConfiguration represents an declarative configuration of the NetworkPolicyStatus type for use +// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use // with apply. -type NetworkPolicyStatusApplyConfiguration struct { - Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +type PriorityLevelConfigurationStatusApplyConfiguration struct { + Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"` } -// NetworkPolicyStatusApplyConfiguration constructs an declarative configuration of the NetworkPolicyStatus type for use with +// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with // apply. -func NetworkPolicyStatus() *NetworkPolicyStatusApplyConfiguration { - return &NetworkPolicyStatusApplyConfiguration{} +func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration { + return &PriorityLevelConfigurationStatusApplyConfiguration{} } // WithConditions adds the given value to the Conditions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *NetworkPolicyStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *NetworkPolicyStatusApplyConfiguration { +func (b *PriorityLevelConfigurationStatusApplyConfiguration) WithConditions(values ...*PriorityLevelConfigurationConditionApplyConfiguration) *PriorityLevelConfigurationStatusApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithConditions") diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go new file mode 100644 index 000000000..fc86c4443 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use +// with apply. +type QueuingConfigurationApplyConfiguration struct { + Queues *int32 `json:"queues,omitempty"` + HandSize *int32 `json:"handSize,omitempty"` + QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"` +} + +// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with +// apply. +func QueuingConfiguration() *QueuingConfigurationApplyConfiguration { + return &QueuingConfigurationApplyConfiguration{} +} + +// WithQueues sets the Queues field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Queues field is set to the value of the last call. +func (b *QueuingConfigurationApplyConfiguration) WithQueues(value int32) *QueuingConfigurationApplyConfiguration { + b.Queues = &value + return b +} + +// WithHandSize sets the HandSize field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HandSize field is set to the value of the last call. +func (b *QueuingConfigurationApplyConfiguration) WithHandSize(value int32) *QueuingConfigurationApplyConfiguration { + b.HandSize = &value + return b +} + +// WithQueueLengthLimit sets the QueueLengthLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the QueueLengthLimit field is set to the value of the last call. +func (b *QueuingConfigurationApplyConfiguration) WithQueueLengthLimit(value int32) *QueuingConfigurationApplyConfiguration { + b.QueueLengthLimit = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go new file mode 100644 index 000000000..72623ffe4 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go @@ -0,0 +1,83 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use +// with apply. +type ResourcePolicyRuleApplyConfiguration struct { + Verbs []string `json:"verbs,omitempty"` + APIGroups []string `json:"apiGroups,omitempty"` + Resources []string `json:"resources,omitempty"` + ClusterScope *bool `json:"clusterScope,omitempty"` + Namespaces []string `json:"namespaces,omitempty"` +} + +// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with +// apply. +func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration { + return &ResourcePolicyRuleApplyConfiguration{} +} + +// WithVerbs adds the given value to the Verbs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Verbs field. +func (b *ResourcePolicyRuleApplyConfiguration) WithVerbs(values ...string) *ResourcePolicyRuleApplyConfiguration { + for i := range values { + b.Verbs = append(b.Verbs, values[i]) + } + return b +} + +// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIGroups field. +func (b *ResourcePolicyRuleApplyConfiguration) WithAPIGroups(values ...string) *ResourcePolicyRuleApplyConfiguration { + for i := range values { + b.APIGroups = append(b.APIGroups, values[i]) + } + return b +} + +// WithResources adds the given value to the Resources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Resources field. +func (b *ResourcePolicyRuleApplyConfiguration) WithResources(values ...string) *ResourcePolicyRuleApplyConfiguration { + for i := range values { + b.Resources = append(b.Resources, values[i]) + } + return b +} + +// WithClusterScope sets the ClusterScope field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterScope field is set to the value of the last call. +func (b *ResourcePolicyRuleApplyConfiguration) WithClusterScope(value bool) *ResourcePolicyRuleApplyConfiguration { + b.ClusterScope = &value + return b +} + +// WithNamespaces adds the given value to the Namespaces field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Namespaces field. +func (b *ResourcePolicyRuleApplyConfiguration) WithNamespaces(values ...string) *ResourcePolicyRuleApplyConfiguration { + for i := range values { + b.Namespaces = append(b.Namespaces, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go new file mode 100644 index 000000000..e2d6b1b21 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use +// with apply. +type ServiceAccountSubjectApplyConfiguration struct { + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` +} + +// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with +// apply. +func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration { + return &ServiceAccountSubjectApplyConfiguration{} +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ServiceAccountSubjectApplyConfiguration) WithNamespace(value string) *ServiceAccountSubjectApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ServiceAccountSubjectApplyConfiguration) WithName(value string) *ServiceAccountSubjectApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go new file mode 100644 index 000000000..f13b8f3ec --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +import ( + v1beta3 "k8s.io/api/flowcontrol/v1beta3" +) + +// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// with apply. +type SubjectApplyConfiguration struct { + Kind *v1beta3.SubjectKind `json:"kind,omitempty"` + User *UserSubjectApplyConfiguration `json:"user,omitempty"` + Group *GroupSubjectApplyConfiguration `json:"group,omitempty"` + ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` +} + +// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// apply. +func Subject() *SubjectApplyConfiguration { + return &SubjectApplyConfiguration{} +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *SubjectApplyConfiguration) WithKind(value v1beta3.SubjectKind) *SubjectApplyConfiguration { + b.Kind = &value + return b +} + +// WithUser sets the User field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the User field is set to the value of the last call. +func (b *SubjectApplyConfiguration) WithUser(value *UserSubjectApplyConfiguration) *SubjectApplyConfiguration { + b.User = value + return b +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *SubjectApplyConfiguration) WithGroup(value *GroupSubjectApplyConfiguration) *SubjectApplyConfiguration { + b.Group = value + return b +} + +// WithServiceAccount sets the ServiceAccount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceAccount field is set to the value of the last call. +func (b *SubjectApplyConfiguration) WithServiceAccount(value *ServiceAccountSubjectApplyConfiguration) *SubjectApplyConfiguration { + b.ServiceAccount = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go new file mode 100644 index 000000000..3db3abbc1 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use +// with apply. +type UserSubjectApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with +// apply. +func UserSubject() *UserSubjectApplyConfiguration { + return &UserSubjectApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *UserSubjectApplyConfiguration) WithName(value string) *UserSubjectApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go index f8e9eec8f..3ed553662 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -39,6 +39,17 @@ func Parser() *typed.Parser { var parserOnce sync.Once var parser *typed.Parser var schemaYAML = typed.YAMLObject(`types: +- name: io.k8s.api.admissionregistration.v1.MatchCondition + map: + fields: + - name: expression + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" - name: io.k8s.api.admissionregistration.v1.MutatingWebhook map: fields: @@ -55,6 +66,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: failurePolicy type: scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.MatchCondition + elementRelationship: associative + keys: + - name - name: matchPolicy type: scalar: string @@ -167,6 +186,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: failurePolicy type: scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.MatchCondition + elementRelationship: associative + keys: + - name - name: matchPolicy type: scalar: string @@ -225,72 +252,65 @@ var schemaYAML = typed.YAMLObject(`types: - name: url type: scalar: string -- name: io.k8s.api.admissionregistration.v1beta1.MutatingWebhook +- name: io.k8s.api.admissionregistration.v1alpha1.AuditAnnotation map: fields: - - name: admissionReviewVersions - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: clientConfig - type: - namedType: io.k8s.api.admissionregistration.v1beta1.WebhookClientConfig - default: {} - - name: failurePolicy + - name: key type: scalar: string - - name: matchPolicy + default: "" + - name: valueExpression type: scalar: string - - name: name + default: "" +- name: io.k8s.api.admissionregistration.v1alpha1.ExpressionWarning + map: + fields: + - name: fieldRef type: scalar: string default: "" - - name: namespaceSelector + - name: warning type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: objectSelector + scalar: string + default: "" +- name: io.k8s.api.admissionregistration.v1alpha1.MatchCondition + map: + fields: + - name: expression type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: reinvocationPolicy + scalar: string + default: "" + - name: name type: scalar: string - - name: rules + default: "" +- name: io.k8s.api.admissionregistration.v1alpha1.MatchResources + map: + fields: + - name: excludeResourceRules type: list: elementType: - namedType: io.k8s.api.admissionregistration.v1beta1.RuleWithOperations + namedType: io.k8s.api.admissionregistration.v1alpha1.NamedRuleWithOperations elementRelationship: atomic - - name: sideEffects - type: - scalar: string - - name: timeoutSeconds - type: - scalar: numeric -- name: io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfiguration - map: - fields: - - name: apiVersion + - name: matchPolicy type: scalar: string - - name: kind + - name: namespaceSelector type: - scalar: string - - name: metadata + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: objectSelector type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: webhooks + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: resourceRules type: list: elementType: - namedType: io.k8s.api.admissionregistration.v1beta1.MutatingWebhook - elementRelationship: associative - keys: - - name -- name: io.k8s.api.admissionregistration.v1beta1.RuleWithOperations + namedType: io.k8s.api.admissionregistration.v1alpha1.NamedRuleWithOperations + elementRelationship: atomic + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1alpha1.NamedRuleWithOperations map: fields: - name: apiGroups @@ -311,6 +331,12 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic + - name: resourceNames + type: + list: + elementType: + scalar: string + elementRelationship: atomic - name: resources type: list: @@ -320,65 +346,64 @@ var schemaYAML = typed.YAMLObject(`types: - name: scope type: scalar: string -- name: io.k8s.api.admissionregistration.v1beta1.ServiceReference + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1alpha1.ParamKind + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1alpha1.ParamRef map: fields: - name: name type: scalar: string - default: "" - name: namespace type: scalar: string - default: "" - - name: path + - name: parameterNotFoundAction type: scalar: string - - name: port + - name: selector type: - scalar: numeric -- name: io.k8s.api.admissionregistration.v1beta1.ValidatingWebhook + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1alpha1.TypeChecking map: fields: - - name: admissionReviewVersions + - name: expressionWarnings type: list: elementType: - scalar: string + namedType: io.k8s.api.admissionregistration.v1alpha1.ExpressionWarning elementRelationship: atomic - - name: clientConfig - type: - namedType: io.k8s.api.admissionregistration.v1beta1.WebhookClientConfig - default: {} - - name: failurePolicy - type: - scalar: string - - name: matchPolicy +- name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy + map: + fields: + - name: apiVersion type: scalar: string - - name: name + - name: kind type: scalar: string - default: "" - - name: namespaceSelector - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: objectSelector - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: rules + - name: metadata type: - list: - elementType: - namedType: io.k8s.api.admissionregistration.v1beta1.RuleWithOperations - elementRelationship: atomic - - name: sideEffects + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec type: - scalar: string - - name: timeoutSeconds + namedType: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec + default: {} + - name: status type: - scalar: numeric -- name: io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration + namedType: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus + default: {} +- name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding map: fields: - name: apiVersion @@ -391,112 +416,647 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} - - name: webhooks + - name: spec type: - list: - elementType: - namedType: io.k8s.api.admissionregistration.v1beta1.ValidatingWebhook - elementRelationship: associative - keys: - - name -- name: io.k8s.api.admissionregistration.v1beta1.WebhookClientConfig + namedType: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingSpec + default: {} +- name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingSpec map: fields: - - name: caBundle + - name: matchResources type: - scalar: string - - name: service + namedType: io.k8s.api.admissionregistration.v1alpha1.MatchResources + - name: paramRef type: - namedType: io.k8s.api.admissionregistration.v1beta1.ServiceReference - - name: url + namedType: io.k8s.api.admissionregistration.v1alpha1.ParamRef + - name: policyName type: scalar: string -- name: io.k8s.api.apiserverinternal.v1alpha1.ServerStorageVersion + - name: validationActions + type: + list: + elementType: + scalar: string + elementRelationship: associative +- name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec map: fields: - - name: apiServerID + - name: auditAnnotations + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.AuditAnnotation + elementRelationship: atomic + - name: failurePolicy type: scalar: string - - name: decodableVersions + - name: matchConditions type: list: elementType: - scalar: string + namedType: io.k8s.api.admissionregistration.v1alpha1.MatchCondition elementRelationship: associative - - name: encodingVersion + keys: + - name + - name: matchConstraints type: - scalar: string -- name: io.k8s.api.apiserverinternal.v1alpha1.StorageVersion - map: - fields: - - name: apiVersion + namedType: io.k8s.api.admissionregistration.v1alpha1.MatchResources + - name: paramKind type: - scalar: string - - name: kind + namedType: io.k8s.api.admissionregistration.v1alpha1.ParamKind + - name: validations type: - scalar: string - - name: metadata + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.Validation + elementRelationship: atomic + - name: variables type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.Variable + elementRelationship: associative + keys: + - name +- name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus + map: + fields: + - name: conditions type: - namedType: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionSpec - default: {} - - name: status + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: observedGeneration type: - namedType: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionStatus - default: {} -- name: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionCondition + scalar: numeric + - name: typeChecking + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.TypeChecking +- name: io.k8s.api.admissionregistration.v1alpha1.Validation map: fields: - - name: lastTransitionTime + - name: expression type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} + scalar: string + default: "" - name: message type: scalar: string - - name: observedGeneration + - name: messageExpression type: - scalar: numeric + scalar: string - name: reason type: scalar: string - default: "" - - name: status +- name: io.k8s.api.admissionregistration.v1alpha1.Variable + map: + fields: + - name: expression type: scalar: string default: "" - - name: type + - name: name type: scalar: string default: "" -- name: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionSpec - map: - elementType: - scalar: untyped - list: - elementType: - namedType: __untyped_atomic_ - elementRelationship: atomic - map: - elementType: - namedType: __untyped_deduced_ - elementRelationship: separable -- name: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionStatus +- name: io.k8s.api.admissionregistration.v1beta1.AuditAnnotation map: fields: - - name: commonEncodingVersion + - name: key type: scalar: string - - name: conditions + default: "" + - name: valueExpression type: - list: - elementType: - namedType: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionCondition - elementRelationship: associative - keys: + scalar: string + default: "" +- name: io.k8s.api.admissionregistration.v1beta1.ExpressionWarning + map: + fields: + - name: fieldRef + type: + scalar: string + default: "" + - name: warning + type: + scalar: string + default: "" +- name: io.k8s.api.admissionregistration.v1beta1.MatchCondition + map: + fields: + - name: expression + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.admissionregistration.v1beta1.MatchResources + map: + fields: + - name: excludeResourceRules + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.NamedRuleWithOperations + elementRelationship: atomic + - name: matchPolicy + type: + scalar: string + - name: namespaceSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: objectSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: resourceRules + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.NamedRuleWithOperations + elementRelationship: atomic + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1beta1.MutatingWebhook + map: + fields: + - name: admissionReviewVersions + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: clientConfig + type: + namedType: io.k8s.api.admissionregistration.v1beta1.WebhookClientConfig + default: {} + - name: failurePolicy + type: + scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.MatchCondition + elementRelationship: associative + keys: + - name + - name: matchPolicy + type: + scalar: string + - name: name + type: + scalar: string + default: "" + - name: namespaceSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: objectSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: reinvocationPolicy + type: + scalar: string + - name: rules + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.RuleWithOperations + elementRelationship: atomic + - name: sideEffects + type: + scalar: string + - name: timeoutSeconds + type: + scalar: numeric +- name: io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfiguration + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: webhooks + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.MutatingWebhook + elementRelationship: associative + keys: + - name +- name: io.k8s.api.admissionregistration.v1beta1.NamedRuleWithOperations + map: + fields: + - name: apiGroups + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: apiVersions + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: operations + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: resourceNames + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: resources + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: scope + type: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1beta1.ParamKind + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1beta1.ParamRef + map: + fields: + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: parameterNotFoundAction + type: + scalar: string + - name: selector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1beta1.ServiceReference + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: namespace + type: + scalar: string + default: "" + - name: path + type: + scalar: string + - name: port + type: + scalar: numeric +- name: io.k8s.api.admissionregistration.v1beta1.TypeChecking + map: + fields: + - name: expressionWarnings + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.ExpressionWarning + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicy + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicySpec + default: {} + - name: status + type: + namedType: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyStatus + default: {} +- name: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBinding + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingSpec + default: {} +- name: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingSpec + map: + fields: + - name: matchResources + type: + namedType: io.k8s.api.admissionregistration.v1beta1.MatchResources + - name: paramRef + type: + namedType: io.k8s.api.admissionregistration.v1beta1.ParamRef + - name: policyName + type: + scalar: string + - name: validationActions + type: + list: + elementType: + scalar: string + elementRelationship: associative +- name: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicySpec + map: + fields: + - name: auditAnnotations + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.AuditAnnotation + elementRelationship: atomic + - name: failurePolicy + type: + scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.MatchCondition + elementRelationship: associative + keys: + - name + - name: matchConstraints + type: + namedType: io.k8s.api.admissionregistration.v1beta1.MatchResources + - name: paramKind + type: + namedType: io.k8s.api.admissionregistration.v1beta1.ParamKind + - name: validations + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.Validation + elementRelationship: atomic + - name: variables + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.Variable + elementRelationship: associative + keys: + - name +- name: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: observedGeneration + type: + scalar: numeric + - name: typeChecking + type: + namedType: io.k8s.api.admissionregistration.v1beta1.TypeChecking +- name: io.k8s.api.admissionregistration.v1beta1.ValidatingWebhook + map: + fields: + - name: admissionReviewVersions + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: clientConfig + type: + namedType: io.k8s.api.admissionregistration.v1beta1.WebhookClientConfig + default: {} + - name: failurePolicy + type: + scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.MatchCondition + elementRelationship: associative + keys: + - name + - name: matchPolicy + type: + scalar: string + - name: name + type: + scalar: string + default: "" + - name: namespaceSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: objectSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: rules + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.RuleWithOperations + elementRelationship: atomic + - name: sideEffects + type: + scalar: string + - name: timeoutSeconds + type: + scalar: numeric +- name: io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: webhooks + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.ValidatingWebhook + elementRelationship: associative + keys: + - name +- name: io.k8s.api.admissionregistration.v1beta1.Validation + map: + fields: + - name: expression + type: + scalar: string + default: "" + - name: message + type: + scalar: string + - name: messageExpression + type: + scalar: string + - name: reason + type: + scalar: string +- name: io.k8s.api.admissionregistration.v1beta1.Variable + map: + fields: + - name: expression + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1beta1.WebhookClientConfig + map: + fields: + - name: caBundle + type: + scalar: string + - name: service + type: + namedType: io.k8s.api.admissionregistration.v1beta1.ServiceReference + - name: url + type: + scalar: string +- name: io.k8s.api.apiserverinternal.v1alpha1.ServerStorageVersion + map: + fields: + - name: apiServerID + type: + scalar: string + - name: decodableVersions + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: encodingVersion + type: + scalar: string + - name: servedVersions + type: + list: + elementType: + scalar: string + elementRelationship: associative +- name: io.k8s.api.apiserverinternal.v1alpha1.StorageVersion + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionSpec + default: {} + - name: status + type: + namedType: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionStatus + default: {} +- name: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionCondition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: message + type: + scalar: string + - name: observedGeneration + type: + scalar: numeric + - name: reason + type: + scalar: string + default: "" + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionSpec + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionStatus + map: + fields: + - name: commonEncodingVersion + type: + scalar: string + - name: conditions + type: + list: + elementType: + namedType: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionCondition + elementRelationship: associative + keys: - type - name: storageVersions type: @@ -910,6 +1470,13 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.apps.v1.StatefulSetOrdinals + map: + fields: + - name: start + type: + scalar: numeric + default: 0 - name: io.k8s.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy map: fields: @@ -925,6 +1492,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: minReadySeconds type: scalar: numeric + - name: ordinals + type: + namedType: io.k8s.api.apps.v1.StatefulSetOrdinals - name: persistentVolumeClaimRetentionPolicy type: namedType: io.k8s.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy @@ -1213,6 +1783,13 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.apps.v1beta1.StatefulSetOrdinals + map: + fields: + - name: start + type: + scalar: numeric + default: 0 - name: io.k8s.api.apps.v1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy map: fields: @@ -1228,6 +1805,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: minReadySeconds type: scalar: numeric + - name: ordinals + type: + namedType: io.k8s.api.apps.v1beta1.StatefulSetOrdinals - name: persistentVolumeClaimRetentionPolicy type: namedType: io.k8s.api.apps.v1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy @@ -1714,6 +2294,13 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.apps.v1beta2.StatefulSetOrdinals + map: + fields: + - name: start + type: + scalar: numeric + default: 0 - name: io.k8s.api.apps.v1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy map: fields: @@ -1729,6 +2316,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: minReadySeconds type: scalar: numeric + - name: ordinals + type: + namedType: io.k8s.api.apps.v1beta2.StatefulSetOrdinals - name: persistentVolumeClaimRetentionPolicy type: namedType: io.k8s.api.apps.v1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy @@ -3029,6 +3619,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: backoffLimit type: scalar: numeric + - name: backoffLimitPerIndex + type: + scalar: numeric - name: completionMode type: scalar: string @@ -3038,9 +3631,18 @@ var schemaYAML = typed.YAMLObject(`types: - name: manualSelector type: scalar: boolean + - name: maxFailedIndexes + type: + scalar: numeric - name: parallelism type: scalar: numeric + - name: podFailurePolicy + type: + namedType: io.k8s.api.batch.v1.PodFailurePolicy + - name: podReplacementPolicy + type: + scalar: string - name: selector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector @@ -3075,6 +3677,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: failed type: scalar: numeric + - name: failedIndexes + type: + scalar: string - name: ready type: scalar: numeric @@ -3083,21 +3688,76 @@ var schemaYAML = typed.YAMLObject(`types: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - name: succeeded type: - scalar: numeric - - name: uncountedTerminatedPods + scalar: numeric + - name: terminating + type: + scalar: numeric + - name: uncountedTerminatedPods + type: + namedType: io.k8s.api.batch.v1.UncountedTerminatedPods +- name: io.k8s.api.batch.v1.JobTemplateSpec + map: + fields: + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.batch.v1.JobSpec + default: {} +- name: io.k8s.api.batch.v1.PodFailurePolicy + map: + fields: + - name: rules + type: + list: + elementType: + namedType: io.k8s.api.batch.v1.PodFailurePolicyRule + elementRelationship: atomic +- name: io.k8s.api.batch.v1.PodFailurePolicyOnExitCodesRequirement + map: + fields: + - name: containerName + type: + scalar: string + - name: operator + type: + scalar: string + default: "" + - name: values + type: + list: + elementType: + scalar: numeric + elementRelationship: associative +- name: io.k8s.api.batch.v1.PodFailurePolicyOnPodConditionsPattern + map: + fields: + - name: status + type: + scalar: string + default: "" + - name: type type: - namedType: io.k8s.api.batch.v1.UncountedTerminatedPods -- name: io.k8s.api.batch.v1.JobTemplateSpec + scalar: string + default: "" +- name: io.k8s.api.batch.v1.PodFailurePolicyRule map: fields: - - name: metadata + - name: action type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec + scalar: string + default: "" + - name: onExitCodes type: - namedType: io.k8s.api.batch.v1.JobSpec - default: {} + namedType: io.k8s.api.batch.v1.PodFailurePolicyOnExitCodesRequirement + - name: onPodConditions + type: + list: + elementType: + namedType: io.k8s.api.batch.v1.PodFailurePolicyOnPodConditionsPattern + elementRelationship: atomic - name: io.k8s.api.batch.v1.UncountedTerminatedPods map: fields: @@ -3288,6 +3948,33 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - type +- name: io.k8s.api.certificates.v1alpha1.ClusterTrustBundle + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.certificates.v1alpha1.ClusterTrustBundleSpec + default: {} +- name: io.k8s.api.certificates.v1alpha1.ClusterTrustBundleSpec + map: + fields: + - name: signerName + type: + scalar: string + - name: trustBundle + type: + scalar: string + default: "" - name: io.k8s.api.certificates.v1beta1.CertificateSigningRequest map: fields: @@ -3565,6 +4252,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: fsType type: scalar: string + - name: nodeExpandSecretRef + type: + namedType: io.k8s.api.core.v1.SecretReference - name: nodePublishSecretRef type: namedType: io.k8s.api.core.v1.SecretReference @@ -3699,6 +4389,15 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.core.v1.ClaimSource + map: + fields: + - name: resourceClaimName + type: + scalar: string + - name: resourceClaimTemplateName + type: + scalar: string - name: io.k8s.api.core.v1.ClientIPConfig map: fields: @@ -3903,10 +4602,19 @@ var schemaYAML = typed.YAMLObject(`types: - name: readinessProbe type: namedType: io.k8s.api.core.v1.Probe + - name: resizePolicy + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ContainerResizePolicy + elementRelationship: atomic - name: resources type: namedType: io.k8s.api.core.v1.ResourceRequirements default: {} + - name: restartPolicy + type: + scalar: string - name: securityContext type: namedType: io.k8s.api.core.v1.SecurityContext @@ -3979,6 +4687,17 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: TCP +- name: io.k8s.api.core.v1.ContainerResizePolicy + map: + fields: + - name: resourceName + type: + scalar: string + default: "" + - name: restartPolicy + type: + scalar: string + default: "" - name: io.k8s.api.core.v1.ContainerState map: fields: @@ -4037,6 +4756,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.core.v1.ContainerStatus map: fields: + - name: allocatedResources + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - name: containerID type: scalar: string @@ -4060,6 +4784,9 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: boolean default: false + - name: resources + type: + namedType: io.k8s.api.core.v1.ResourceRequirements - name: restartCount type: scalar: numeric @@ -4295,10 +5022,19 @@ var schemaYAML = typed.YAMLObject(`types: - name: readinessProbe type: namedType: io.k8s.api.core.v1.Probe + - name: resizePolicy + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ContainerResizePolicy + elementRelationship: atomic - name: resources type: namedType: io.k8s.api.core.v1.ResourceRequirements default: {} + - name: restartPolicy + type: + scalar: string - name: securityContext type: namedType: io.k8s.api.core.v1.SecurityContext @@ -4629,6 +5365,12 @@ var schemaYAML = typed.YAMLObject(`types: - name: ip type: scalar: string +- name: io.k8s.api.core.v1.HostIP + map: + fields: + - name: ip + type: + scalar: string - name: io.k8s.api.core.v1.HostPathVolumeSource map: fields: @@ -5327,7 +6069,7 @@ var schemaYAML = typed.YAMLObject(`types: namedType: io.k8s.api.core.v1.TypedLocalObjectReference - name: dataSourceRef type: - namedType: io.k8s.api.core.v1.TypedLocalObjectReference + namedType: io.k8s.api.core.v1.TypedObjectReference - name: resources type: namedType: io.k8s.api.core.v1.ResourceRequirements @@ -5353,6 +6095,12 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic + - name: allocatedResourceStatuses + type: + map: + elementType: + scalar: string + elementRelationship: separable - name: allocatedResources type: map: @@ -5374,9 +6122,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: phase type: scalar: string - - name: resizeStatus - type: - scalar: string - name: io.k8s.api.core.v1.PersistentVolumeClaimTemplate map: fields: @@ -5430,6 +6175,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: claimRef type: namedType: io.k8s.api.core.v1.ObjectReference + elementRelationship: separable - name: csi type: namedType: io.k8s.api.core.v1.CSIPersistentVolumeSource @@ -5502,6 +6248,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.core.v1.PersistentVolumeStatus map: fields: + - name: lastPhaseTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - name: message type: scalar: string @@ -5666,6 +6415,34 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.core.v1.PodResourceClaim + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: source + type: + namedType: io.k8s.api.core.v1.ClaimSource + default: {} +- name: io.k8s.api.core.v1.PodResourceClaimStatus + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: resourceClaimName + type: + scalar: string +- name: io.k8s.api.core.v1.PodSchedulingGate + map: + fields: + - name: name + type: + scalar: string + default: "" - name: io.k8s.api.core.v1.PodSecurityContext map: fields: @@ -5759,6 +6536,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: hostPID type: scalar: boolean + - name: hostUsers + type: + scalar: boolean - name: hostname type: scalar: string @@ -5810,6 +6590,14 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.core.v1.PodReadinessGate elementRelationship: atomic + - name: resourceClaims + type: + list: + elementType: + namedType: io.k8s.api.core.v1.PodResourceClaim + elementRelationship: associative + keys: + - name - name: restartPolicy type: scalar: string @@ -5819,6 +6607,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: schedulerName type: scalar: string + - name: schedulingGates + type: + list: + elementType: + namedType: io.k8s.api.core.v1.PodSchedulingGate + elementRelationship: associative + keys: + - name - name: securityContext type: namedType: io.k8s.api.core.v1.PodSecurityContext @@ -5889,6 +6685,12 @@ var schemaYAML = typed.YAMLObject(`types: - name: hostIP type: scalar: string + - name: hostIPs + type: + list: + elementType: + namedType: io.k8s.api.core.v1.HostIP + elementRelationship: atomic - name: initContainerStatuses type: list: @@ -5921,6 +6723,17 @@ var schemaYAML = typed.YAMLObject(`types: - name: reason type: scalar: string + - name: resize + type: + scalar: string + - name: resourceClaimStatuses + type: + list: + elementType: + namedType: io.k8s.api.core.v1.PodResourceClaimStatus + elementRelationship: associative + keys: + - name - name: startTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time @@ -6207,6 +7020,13 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: numeric default: 0 +- name: io.k8s.api.core.v1.ResourceClaim + map: + fields: + - name: name + type: + scalar: string + default: "" - name: io.k8s.api.core.v1.ResourceFieldSelector map: fields: @@ -6276,6 +7096,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.core.v1.ResourceRequirements map: fields: + - name: claims + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ResourceClaim + elementRelationship: associative + keys: + - name - name: limits type: map: @@ -6855,6 +7683,12 @@ var schemaYAML = typed.YAMLObject(`types: - name: labelSelector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: matchLabelKeys + type: + list: + elementType: + scalar: string + elementRelationship: atomic - name: maxSkew type: scalar: numeric @@ -6862,6 +7696,12 @@ var schemaYAML = typed.YAMLObject(`types: - name: minDomains type: scalar: numeric + - name: nodeAffinityPolicy + type: + scalar: string + - name: nodeTaintsPolicy + type: + scalar: string - name: topologyKey type: scalar: string @@ -6885,6 +7725,23 @@ var schemaYAML = typed.YAMLObject(`types: scalar: string default: "" elementRelationship: atomic +- name: io.k8s.api.core.v1.TypedObjectReference + map: + fields: + - name: apiGroup + type: + scalar: string + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: namespace + type: + scalar: string - name: io.k8s.api.core.v1.Volume map: fields: @@ -7426,29 +8283,6 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime default: {} -- name: io.k8s.api.extensions.v1beta1.AllowedCSIDriver - map: - fields: - - name: name - type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.AllowedFlexVolume - map: - fields: - - name: driver - type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.AllowedHostPath - map: - fields: - - name: pathPrefix - type: - scalar: string - - name: readOnly - type: - scalar: boolean - name: io.k8s.api.extensions.v1beta1.DaemonSet map: fields: @@ -7684,18 +8518,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.extensions.v1beta1.FSGroupStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - name: io.k8s.api.extensions.v1beta1.HTTPIngressPath map: fields: @@ -7718,28 +8540,6 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.extensions.v1beta1.HTTPIngressPath elementRelationship: atomic -- name: io.k8s.api.extensions.v1beta1.HostPortRange - map: - fields: - - name: max - type: - scalar: numeric - default: 0 - - name: min - type: - scalar: numeric - default: 0 -- name: io.k8s.api.extensions.v1beta1.IDRange - map: - fields: - - name: max - type: - scalar: numeric - default: 0 - - name: min - type: - scalar: numeric - default: 0 - name: io.k8s.api.extensions.v1beta1.IPBlock map: fields: @@ -7777,16 +8577,54 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.extensions.v1beta1.IngressBackend map: fields: - - name: resource + - name: resource + type: + namedType: io.k8s.api.core.v1.TypedLocalObjectReference + - name: serviceName + type: + scalar: string + - name: servicePort + type: + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + default: {} +- name: io.k8s.api.extensions.v1beta1.IngressLoadBalancerIngress + map: + fields: + - name: hostname + type: + scalar: string + - name: ip + type: + scalar: string + - name: ports + type: + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.IngressPortStatus + elementRelationship: atomic +- name: io.k8s.api.extensions.v1beta1.IngressLoadBalancerStatus + map: + fields: + - name: ingress type: - namedType: io.k8s.api.core.v1.TypedLocalObjectReference - - name: serviceName + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.IngressLoadBalancerIngress + elementRelationship: atomic +- name: io.k8s.api.extensions.v1beta1.IngressPortStatus + map: + fields: + - name: error type: scalar: string - - name: servicePort + - name: port type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} + scalar: numeric + default: 0 + - name: protocol + type: + scalar: string + default: "" - name: io.k8s.api.extensions.v1beta1.IngressRule map: fields: @@ -7822,7 +8660,7 @@ var schemaYAML = typed.YAMLObject(`types: fields: - name: loadBalancer type: - namedType: io.k8s.api.core.v1.LoadBalancerStatus + namedType: io.k8s.api.extensions.v1beta1.IngressLoadBalancerStatus default: {} - name: io.k8s.api.extensions.v1beta1.IngressTLS map: @@ -7853,10 +8691,6 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.extensions.v1beta1.NetworkPolicySpec default: {} - - name: status - type: - namedType: io.k8s.api.extensions.v1beta1.NetworkPolicyStatus - default: {} - name: io.k8s.api.extensions.v1beta1.NetworkPolicyEgressRule map: fields: @@ -7936,18 +8770,132 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic -- name: io.k8s.api.extensions.v1beta1.NetworkPolicyStatus +- name: io.k8s.api.extensions.v1beta1.ReplicaSet + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.extensions.v1beta1.ReplicaSetSpec + default: {} + - name: status + type: + namedType: io.k8s.api.extensions.v1beta1.ReplicaSetStatus + default: {} +- name: io.k8s.api.extensions.v1beta1.ReplicaSetCondition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: io.k8s.api.extensions.v1beta1.ReplicaSetSpec + map: + fields: + - name: minReadySeconds + type: + scalar: numeric + - name: replicas + type: + scalar: numeric + - name: selector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: template + type: + namedType: io.k8s.api.core.v1.PodTemplateSpec + default: {} +- name: io.k8s.api.extensions.v1beta1.ReplicaSetStatus map: fields: + - name: availableReplicas + type: + scalar: numeric - name: conditions type: list: elementType: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + namedType: io.k8s.api.extensions.v1beta1.ReplicaSetCondition elementRelationship: associative keys: - type -- name: io.k8s.api.extensions.v1beta1.PodSecurityPolicy + - name: fullyLabeledReplicas + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + - name: replicas + type: + scalar: numeric + default: 0 +- name: io.k8s.api.extensions.v1beta1.RollbackConfig + map: + fields: + - name: revision + type: + scalar: numeric +- name: io.k8s.api.extensions.v1beta1.RollingUpdateDaemonSet + map: + fields: + - name: maxSurge + type: + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + - name: maxUnavailable + type: + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString +- name: io.k8s.api.extensions.v1beta1.RollingUpdateDeployment + map: + fields: + - name: maxSurge + type: + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + - name: maxUnavailable + type: + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString +- name: io.k8s.api.flowcontrol.v1alpha1.ExemptPriorityLevelConfiguration + map: + fields: + - name: lendablePercent + type: + scalar: numeric + - name: nominalConcurrencyShares + type: + scalar: numeric +- name: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod + map: + fields: + - name: type + type: + scalar: string + default: "" +- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchema map: fields: - name: apiVersion @@ -7962,121 +8910,138 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.extensions.v1beta1.PodSecurityPolicySpec + namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaSpec + default: {} + - name: status + type: + namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaStatus default: {} -- name: io.k8s.api.extensions.v1beta1.PodSecurityPolicySpec +- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaCondition map: fields: - - name: allowPrivilegeEscalation + - name: lastTransitionTime type: - scalar: boolean - - name: allowedCSIDrivers + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: message type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedCSIDriver - elementRelationship: atomic - - name: allowedCapabilities + scalar: string + - name: reason type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: allowedFlexVolumes + scalar: string + - name: status type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedFlexVolume - elementRelationship: atomic - - name: allowedHostPaths + scalar: string + - name: type type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedHostPath - elementRelationship: atomic - - name: allowedProcMountTypes + scalar: string +- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaSpec + map: + fields: + - name: distinguisherMethod type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: allowedUnsafeSysctls + namedType: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod + - name: matchingPrecedence + type: + scalar: numeric + default: 0 + - name: priorityLevelConfiguration + type: + namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference + default: {} + - name: rules type: list: elementType: - scalar: string + namedType: io.k8s.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects elementRelationship: atomic - - name: defaultAddCapabilities +- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaStatus + map: + fields: + - name: conditions type: list: elementType: - scalar: string - elementRelationship: atomic - - name: defaultAllowPrivilegeEscalation + namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaCondition + elementRelationship: associative + keys: + - type +- name: io.k8s.api.flowcontrol.v1alpha1.GroupSubject + map: + fields: + - name: name type: - scalar: boolean - - name: forbiddenSysctls + scalar: string + default: "" +- name: io.k8s.api.flowcontrol.v1alpha1.LimitResponse + map: + fields: + - name: queuing + type: + namedType: io.k8s.api.flowcontrol.v1alpha1.QueuingConfiguration + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: queuing + discriminatorValue: Queuing +- name: io.k8s.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration + map: + fields: + - name: assuredConcurrencyShares + type: + scalar: numeric + default: 0 + - name: borrowingLimitPercent + type: + scalar: numeric + - name: lendablePercent + type: + scalar: numeric + - name: limitResponse + type: + namedType: io.k8s.api.flowcontrol.v1alpha1.LimitResponse + default: {} +- name: io.k8s.api.flowcontrol.v1alpha1.NonResourcePolicyRule + map: + fields: + - name: nonResourceURLs type: list: elementType: scalar: string - elementRelationship: atomic - - name: fsGroup - type: - namedType: io.k8s.api.extensions.v1beta1.FSGroupStrategyOptions - default: {} - - name: hostIPC - type: - scalar: boolean - - name: hostNetwork - type: - scalar: boolean - - name: hostPID + elementRelationship: associative + - name: verbs type: - scalar: boolean - - name: hostPorts + list: + elementType: + scalar: string + elementRelationship: associative +- name: io.k8s.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects + map: + fields: + - name: nonResourceRules type: list: elementType: - namedType: io.k8s.api.extensions.v1beta1.HostPortRange + namedType: io.k8s.api.flowcontrol.v1alpha1.NonResourcePolicyRule elementRelationship: atomic - - name: privileged - type: - scalar: boolean - - name: readOnlyRootFilesystem - type: - scalar: boolean - - name: requiredDropCapabilities + - name: resourceRules type: list: elementType: - scalar: string + namedType: io.k8s.api.flowcontrol.v1alpha1.ResourcePolicyRule elementRelationship: atomic - - name: runAsGroup - type: - namedType: io.k8s.api.extensions.v1beta1.RunAsGroupStrategyOptions - - name: runAsUser - type: - namedType: io.k8s.api.extensions.v1beta1.RunAsUserStrategyOptions - default: {} - - name: runtimeClass - type: - namedType: io.k8s.api.extensions.v1beta1.RuntimeClassStrategyOptions - - name: seLinux - type: - namedType: io.k8s.api.extensions.v1beta1.SELinuxStrategyOptions - default: {} - - name: supplementalGroups - type: - namedType: io.k8s.api.extensions.v1beta1.SupplementalGroupsStrategyOptions - default: {} - - name: volumes + - name: subjects type: list: elementType: - scalar: string + namedType: io.k8s.api.flowcontrol.v1alpha1.Subject elementRelationship: atomic -- name: io.k8s.api.extensions.v1beta1.ReplicaSet +- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfiguration map: fields: - name: apiVersion @@ -8091,13 +9056,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.extensions.v1beta1.ReplicaSetSpec + namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec default: {} - name: status type: - namedType: io.k8s.api.extensions.v1beta1.ReplicaSetStatus + namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus default: {} -- name: io.k8s.api.extensions.v1beta1.ReplicaSetCondition +- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition map: fields: - name: lastTransitionTime @@ -8113,146 +9078,152 @@ var schemaYAML = typed.YAMLObject(`types: - name: status type: scalar: string - default: "" - name: type type: scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.ReplicaSetSpec +- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference map: fields: - - name: minReadySeconds + - name: name type: - scalar: numeric - - name: replicas + scalar: string + default: "" +- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec + map: + fields: + - name: exempt type: - scalar: numeric - - name: selector + namedType: io.k8s.api.flowcontrol.v1alpha1.ExemptPriorityLevelConfiguration + - name: limited type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: template + namedType: io.k8s.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration + - name: type type: - namedType: io.k8s.api.core.v1.PodTemplateSpec - default: {} -- name: io.k8s.api.extensions.v1beta1.ReplicaSetStatus + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: exempt + discriminatorValue: Exempt + - fieldName: limited + discriminatorValue: Limited +- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus map: fields: - - name: availableReplicas - type: - scalar: numeric - name: conditions type: list: elementType: - namedType: io.k8s.api.extensions.v1beta1.ReplicaSetCondition + namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition elementRelationship: associative keys: - type - - name: fullyLabeledReplicas - type: - scalar: numeric - - name: observedGeneration +- name: io.k8s.api.flowcontrol.v1alpha1.QueuingConfiguration + map: + fields: + - name: handSize type: scalar: numeric - - name: readyReplicas + default: 0 + - name: queueLengthLimit type: scalar: numeric - - name: replicas + default: 0 + - name: queues type: scalar: numeric default: 0 -- name: io.k8s.api.extensions.v1beta1.RollbackConfig +- name: io.k8s.api.flowcontrol.v1alpha1.ResourcePolicyRule map: fields: - - name: revision + - name: apiGroups type: - scalar: numeric -- name: io.k8s.api.extensions.v1beta1.RollingUpdateDaemonSet - map: - fields: - - name: maxSurge + list: + elementType: + scalar: string + elementRelationship: associative + - name: clusterScope type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - - name: maxUnavailable + scalar: boolean + - name: namespaces type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString -- name: io.k8s.api.extensions.v1beta1.RollingUpdateDeployment - map: - fields: - - name: maxSurge + list: + elementType: + scalar: string + elementRelationship: associative + - name: resources type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - - name: maxUnavailable + list: + elementType: + scalar: string + elementRelationship: associative + - name: verbs type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString -- name: io.k8s.api.extensions.v1beta1.RunAsGroupStrategyOptions + list: + elementType: + scalar: string + elementRelationship: associative +- name: io.k8s.api.flowcontrol.v1alpha1.ServiceAccountSubject map: fields: - - name: ranges + - name: name type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule + scalar: string + default: "" + - name: namespace type: scalar: string default: "" -- name: io.k8s.api.extensions.v1beta1.RunAsUserStrategyOptions +- name: io.k8s.api.flowcontrol.v1alpha1.Subject map: fields: - - name: ranges + - name: group type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule + namedType: io.k8s.api.flowcontrol.v1alpha1.GroupSubject + - name: kind type: scalar: string default: "" -- name: io.k8s.api.extensions.v1beta1.RuntimeClassStrategyOptions - map: - fields: - - name: allowedRuntimeClassNames + - name: serviceAccount type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultRuntimeClassName + namedType: io.k8s.api.flowcontrol.v1alpha1.ServiceAccountSubject + - name: user type: - scalar: string -- name: io.k8s.api.extensions.v1beta1.SELinuxStrategyOptions + namedType: io.k8s.api.flowcontrol.v1alpha1.UserSubject + unions: + - discriminator: kind + fields: + - fieldName: group + discriminatorValue: Group + - fieldName: serviceAccount + discriminatorValue: ServiceAccount + - fieldName: user + discriminatorValue: User +- name: io.k8s.api.flowcontrol.v1alpha1.UserSubject map: fields: - - name: rule + - name: name type: scalar: string default: "" - - name: seLinuxOptions - type: - namedType: io.k8s.api.core.v1.SELinuxOptions -- name: io.k8s.api.extensions.v1beta1.SupplementalGroupsStrategyOptions +- name: io.k8s.api.flowcontrol.v1beta1.ExemptPriorityLevelConfiguration map: fields: - - name: ranges + - name: lendablePercent type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule + scalar: numeric + - name: nominalConcurrencyShares type: - scalar: string -- name: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod + scalar: numeric +- name: io.k8s.api.flowcontrol.v1beta1.FlowDistinguisherMethod map: fields: - name: type type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchema +- name: io.k8s.api.flowcontrol.v1beta1.FlowSchema map: fields: - name: apiVersion @@ -8267,13 +9238,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaSpec + namedType: io.k8s.api.flowcontrol.v1beta1.FlowSchemaSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaStatus + namedType: io.k8s.api.flowcontrol.v1beta1.FlowSchemaStatus default: {} -- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaCondition +- name: io.k8s.api.flowcontrol.v1beta1.FlowSchemaCondition map: fields: - name: lastTransitionTime @@ -8292,50 +9263,50 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaSpec +- name: io.k8s.api.flowcontrol.v1beta1.FlowSchemaSpec map: fields: - name: distinguisherMethod type: - namedType: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod + namedType: io.k8s.api.flowcontrol.v1beta1.FlowDistinguisherMethod - name: matchingPrecedence type: scalar: numeric default: 0 - name: priorityLevelConfiguration type: - namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference + namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationReference default: {} - name: rules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects + namedType: io.k8s.api.flowcontrol.v1beta1.PolicyRulesWithSubjects elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaStatus +- name: io.k8s.api.flowcontrol.v1beta1.FlowSchemaStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaCondition + namedType: io.k8s.api.flowcontrol.v1beta1.FlowSchemaCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1alpha1.GroupSubject +- name: io.k8s.api.flowcontrol.v1beta1.GroupSubject map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1alpha1.LimitResponse +- name: io.k8s.api.flowcontrol.v1beta1.LimitResponse map: fields: - name: queuing type: - namedType: io.k8s.api.flowcontrol.v1alpha1.QueuingConfiguration + namedType: io.k8s.api.flowcontrol.v1beta1.QueuingConfiguration - name: type type: scalar: string @@ -8345,18 +9316,24 @@ var schemaYAML = typed.YAMLObject(`types: fields: - fieldName: queuing discriminatorValue: Queuing -- name: io.k8s.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration map: fields: - name: assuredConcurrencyShares type: scalar: numeric default: 0 + - name: borrowingLimitPercent + type: + scalar: numeric + - name: lendablePercent + type: + scalar: numeric - name: limitResponse type: - namedType: io.k8s.api.flowcontrol.v1alpha1.LimitResponse + namedType: io.k8s.api.flowcontrol.v1beta1.LimitResponse default: {} -- name: io.k8s.api.flowcontrol.v1alpha1.NonResourcePolicyRule +- name: io.k8s.api.flowcontrol.v1beta1.NonResourcePolicyRule map: fields: - name: nonResourceURLs @@ -8371,28 +9348,28 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects +- name: io.k8s.api.flowcontrol.v1beta1.PolicyRulesWithSubjects map: fields: - name: nonResourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.NonResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1beta1.NonResourcePolicyRule elementRelationship: atomic - name: resourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.ResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1beta1.ResourcePolicyRule elementRelationship: atomic - name: subjects type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.Subject + namedType: io.k8s.api.flowcontrol.v1beta1.Subject elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration map: fields: - name: apiVersion @@ -8407,13 +9384,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec + namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus + namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationStatus default: {} -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition +- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationCondition map: fields: - name: lastTransitionTime @@ -8432,19 +9409,22 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference +- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationReference map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec +- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationSpec map: fields: + - name: exempt + type: + namedType: io.k8s.api.flowcontrol.v1beta1.ExemptPriorityLevelConfiguration - name: limited type: - namedType: io.k8s.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration + namedType: io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration - name: type type: scalar: string @@ -8452,20 +9432,22 @@ var schemaYAML = typed.YAMLObject(`types: unions: - discriminator: type fields: + - fieldName: exempt + discriminatorValue: Exempt - fieldName: limited discriminatorValue: Limited -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus +- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition + namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1alpha1.QueuingConfiguration +- name: io.k8s.api.flowcontrol.v1beta1.QueuingConfiguration map: fields: - name: handSize @@ -8480,7 +9462,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: numeric default: 0 -- name: io.k8s.api.flowcontrol.v1alpha1.ResourcePolicyRule +- name: io.k8s.api.flowcontrol.v1beta1.ResourcePolicyRule map: fields: - name: apiGroups @@ -8510,7 +9492,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1alpha1.ServiceAccountSubject +- name: io.k8s.api.flowcontrol.v1beta1.ServiceAccountSubject map: fields: - name: name @@ -8521,22 +9503,22 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1alpha1.Subject +- name: io.k8s.api.flowcontrol.v1beta1.Subject map: fields: - name: group type: - namedType: io.k8s.api.flowcontrol.v1alpha1.GroupSubject + namedType: io.k8s.api.flowcontrol.v1beta1.GroupSubject - name: kind type: scalar: string default: "" - name: serviceAccount type: - namedType: io.k8s.api.flowcontrol.v1alpha1.ServiceAccountSubject + namedType: io.k8s.api.flowcontrol.v1beta1.ServiceAccountSubject - name: user type: - namedType: io.k8s.api.flowcontrol.v1alpha1.UserSubject + namedType: io.k8s.api.flowcontrol.v1beta1.UserSubject unions: - discriminator: kind fields: @@ -8546,21 +9528,30 @@ var schemaYAML = typed.YAMLObject(`types: discriminatorValue: ServiceAccount - fieldName: user discriminatorValue: User -- name: io.k8s.api.flowcontrol.v1alpha1.UserSubject +- name: io.k8s.api.flowcontrol.v1beta1.UserSubject map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta1.FlowDistinguisherMethod +- name: io.k8s.api.flowcontrol.v1beta2.ExemptPriorityLevelConfiguration + map: + fields: + - name: lendablePercent + type: + scalar: numeric + - name: nominalConcurrencyShares + type: + scalar: numeric +- name: io.k8s.api.flowcontrol.v1beta2.FlowDistinguisherMethod map: fields: - name: type type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta1.FlowSchema +- name: io.k8s.api.flowcontrol.v1beta2.FlowSchema map: fields: - name: apiVersion @@ -8575,13 +9566,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1beta1.FlowSchemaSpec + namedType: io.k8s.api.flowcontrol.v1beta2.FlowSchemaSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1beta1.FlowSchemaStatus + namedType: io.k8s.api.flowcontrol.v1beta2.FlowSchemaStatus default: {} -- name: io.k8s.api.flowcontrol.v1beta1.FlowSchemaCondition +- name: io.k8s.api.flowcontrol.v1beta2.FlowSchemaCondition map: fields: - name: lastTransitionTime @@ -8600,50 +9591,50 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1beta1.FlowSchemaSpec +- name: io.k8s.api.flowcontrol.v1beta2.FlowSchemaSpec map: fields: - name: distinguisherMethod type: - namedType: io.k8s.api.flowcontrol.v1beta1.FlowDistinguisherMethod + namedType: io.k8s.api.flowcontrol.v1beta2.FlowDistinguisherMethod - name: matchingPrecedence type: scalar: numeric default: 0 - name: priorityLevelConfiguration type: - namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationReference + namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationReference default: {} - name: rules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta1.PolicyRulesWithSubjects + namedType: io.k8s.api.flowcontrol.v1beta2.PolicyRulesWithSubjects elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1beta1.FlowSchemaStatus +- name: io.k8s.api.flowcontrol.v1beta2.FlowSchemaStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta1.FlowSchemaCondition + namedType: io.k8s.api.flowcontrol.v1beta2.FlowSchemaCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1beta1.GroupSubject +- name: io.k8s.api.flowcontrol.v1beta2.GroupSubject map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta1.LimitResponse +- name: io.k8s.api.flowcontrol.v1beta2.LimitResponse map: fields: - name: queuing type: - namedType: io.k8s.api.flowcontrol.v1beta1.QueuingConfiguration + namedType: io.k8s.api.flowcontrol.v1beta2.QueuingConfiguration - name: type type: scalar: string @@ -8653,18 +9644,24 @@ var schemaYAML = typed.YAMLObject(`types: fields: - fieldName: queuing discriminatorValue: Queuing -- name: io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration map: fields: - name: assuredConcurrencyShares type: scalar: numeric default: 0 + - name: borrowingLimitPercent + type: + scalar: numeric + - name: lendablePercent + type: + scalar: numeric - name: limitResponse type: - namedType: io.k8s.api.flowcontrol.v1beta1.LimitResponse + namedType: io.k8s.api.flowcontrol.v1beta2.LimitResponse default: {} -- name: io.k8s.api.flowcontrol.v1beta1.NonResourcePolicyRule +- name: io.k8s.api.flowcontrol.v1beta2.NonResourcePolicyRule map: fields: - name: nonResourceURLs @@ -8679,28 +9676,28 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1beta1.PolicyRulesWithSubjects +- name: io.k8s.api.flowcontrol.v1beta2.PolicyRulesWithSubjects map: fields: - name: nonResourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta1.NonResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1beta2.NonResourcePolicyRule elementRelationship: atomic - name: resourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta1.ResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1beta2.ResourcePolicyRule elementRelationship: atomic - name: subjects type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta1.Subject + namedType: io.k8s.api.flowcontrol.v1beta2.Subject elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfiguration map: fields: - name: apiVersion @@ -8715,13 +9712,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationSpec + namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationStatus + namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationStatus default: {} -- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationCondition +- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationCondition map: fields: - name: lastTransitionTime @@ -8740,19 +9737,22 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationReference +- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationReference map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationSpec +- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationSpec map: fields: + - name: exempt + type: + namedType: io.k8s.api.flowcontrol.v1beta2.ExemptPriorityLevelConfiguration - name: limited type: - namedType: io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration + namedType: io.k8s.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration - name: type type: scalar: string @@ -8760,20 +9760,22 @@ var schemaYAML = typed.YAMLObject(`types: unions: - discriminator: type fields: + - fieldName: exempt + discriminatorValue: Exempt - fieldName: limited discriminatorValue: Limited -- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationStatus +- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationCondition + namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1beta1.QueuingConfiguration +- name: io.k8s.api.flowcontrol.v1beta2.QueuingConfiguration map: fields: - name: handSize @@ -8788,7 +9790,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: numeric default: 0 -- name: io.k8s.api.flowcontrol.v1beta1.ResourcePolicyRule +- name: io.k8s.api.flowcontrol.v1beta2.ResourcePolicyRule map: fields: - name: apiGroups @@ -8818,7 +9820,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1beta1.ServiceAccountSubject +- name: io.k8s.api.flowcontrol.v1beta2.ServiceAccountSubject map: fields: - name: name @@ -8829,22 +9831,22 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta1.Subject +- name: io.k8s.api.flowcontrol.v1beta2.Subject map: fields: - name: group type: - namedType: io.k8s.api.flowcontrol.v1beta1.GroupSubject + namedType: io.k8s.api.flowcontrol.v1beta2.GroupSubject - name: kind type: scalar: string default: "" - name: serviceAccount type: - namedType: io.k8s.api.flowcontrol.v1beta1.ServiceAccountSubject + namedType: io.k8s.api.flowcontrol.v1beta2.ServiceAccountSubject - name: user type: - namedType: io.k8s.api.flowcontrol.v1beta1.UserSubject + namedType: io.k8s.api.flowcontrol.v1beta2.UserSubject unions: - discriminator: kind fields: @@ -8854,21 +9856,30 @@ var schemaYAML = typed.YAMLObject(`types: discriminatorValue: ServiceAccount - fieldName: user discriminatorValue: User -- name: io.k8s.api.flowcontrol.v1beta1.UserSubject +- name: io.k8s.api.flowcontrol.v1beta2.UserSubject map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta2.FlowDistinguisherMethod +- name: io.k8s.api.flowcontrol.v1beta3.ExemptPriorityLevelConfiguration + map: + fields: + - name: lendablePercent + type: + scalar: numeric + - name: nominalConcurrencyShares + type: + scalar: numeric +- name: io.k8s.api.flowcontrol.v1beta3.FlowDistinguisherMethod map: fields: - name: type type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta2.FlowSchema +- name: io.k8s.api.flowcontrol.v1beta3.FlowSchema map: fields: - name: apiVersion @@ -8883,13 +9894,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1beta2.FlowSchemaSpec + namedType: io.k8s.api.flowcontrol.v1beta3.FlowSchemaSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1beta2.FlowSchemaStatus + namedType: io.k8s.api.flowcontrol.v1beta3.FlowSchemaStatus default: {} -- name: io.k8s.api.flowcontrol.v1beta2.FlowSchemaCondition +- name: io.k8s.api.flowcontrol.v1beta3.FlowSchemaCondition map: fields: - name: lastTransitionTime @@ -8908,50 +9919,50 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1beta2.FlowSchemaSpec +- name: io.k8s.api.flowcontrol.v1beta3.FlowSchemaSpec map: fields: - name: distinguisherMethod type: - namedType: io.k8s.api.flowcontrol.v1beta2.FlowDistinguisherMethod + namedType: io.k8s.api.flowcontrol.v1beta3.FlowDistinguisherMethod - name: matchingPrecedence type: scalar: numeric default: 0 - name: priorityLevelConfiguration type: - namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationReference + namedType: io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationReference default: {} - name: rules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta2.PolicyRulesWithSubjects + namedType: io.k8s.api.flowcontrol.v1beta3.PolicyRulesWithSubjects elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1beta2.FlowSchemaStatus +- name: io.k8s.api.flowcontrol.v1beta3.FlowSchemaStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta2.FlowSchemaCondition + namedType: io.k8s.api.flowcontrol.v1beta3.FlowSchemaCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1beta2.GroupSubject +- name: io.k8s.api.flowcontrol.v1beta3.GroupSubject map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta2.LimitResponse +- name: io.k8s.api.flowcontrol.v1beta3.LimitResponse map: fields: - name: queuing type: - namedType: io.k8s.api.flowcontrol.v1beta2.QueuingConfiguration + namedType: io.k8s.api.flowcontrol.v1beta3.QueuingConfiguration - name: type type: scalar: string @@ -8961,18 +9972,24 @@ var schemaYAML = typed.YAMLObject(`types: fields: - fieldName: queuing discriminatorValue: Queuing -- name: io.k8s.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1beta3.LimitedPriorityLevelConfiguration map: fields: - - name: assuredConcurrencyShares + - name: borrowingLimitPercent + type: + scalar: numeric + - name: lendablePercent type: scalar: numeric - default: 0 - name: limitResponse type: - namedType: io.k8s.api.flowcontrol.v1beta2.LimitResponse + namedType: io.k8s.api.flowcontrol.v1beta3.LimitResponse default: {} -- name: io.k8s.api.flowcontrol.v1beta2.NonResourcePolicyRule + - name: nominalConcurrencyShares + type: + scalar: numeric + default: 0 +- name: io.k8s.api.flowcontrol.v1beta3.NonResourcePolicyRule map: fields: - name: nonResourceURLs @@ -8987,28 +10004,28 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1beta2.PolicyRulesWithSubjects +- name: io.k8s.api.flowcontrol.v1beta3.PolicyRulesWithSubjects map: fields: - name: nonResourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta2.NonResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1beta3.NonResourcePolicyRule elementRelationship: atomic - name: resourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta2.ResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1beta3.ResourcePolicyRule elementRelationship: atomic - name: subjects type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta2.Subject + namedType: io.k8s.api.flowcontrol.v1beta3.Subject elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfiguration map: fields: - name: apiVersion @@ -9023,13 +10040,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationSpec + namedType: io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationStatus + namedType: io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationStatus default: {} -- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationCondition +- name: io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationCondition map: fields: - name: lastTransitionTime @@ -9048,19 +10065,22 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationReference +- name: io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationReference map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationSpec +- name: io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationSpec map: fields: + - name: exempt + type: + namedType: io.k8s.api.flowcontrol.v1beta3.ExemptPriorityLevelConfiguration - name: limited type: - namedType: io.k8s.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration + namedType: io.k8s.api.flowcontrol.v1beta3.LimitedPriorityLevelConfiguration - name: type type: scalar: string @@ -9068,20 +10088,22 @@ var schemaYAML = typed.YAMLObject(`types: unions: - discriminator: type fields: + - fieldName: exempt + discriminatorValue: Exempt - fieldName: limited discriminatorValue: Limited -- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationStatus +- name: io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationCondition + namedType: io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1beta2.QueuingConfiguration +- name: io.k8s.api.flowcontrol.v1beta3.QueuingConfiguration map: fields: - name: handSize @@ -9096,7 +10118,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: numeric default: 0 -- name: io.k8s.api.flowcontrol.v1beta2.ResourcePolicyRule +- name: io.k8s.api.flowcontrol.v1beta3.ResourcePolicyRule map: fields: - name: apiGroups @@ -9126,7 +10148,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1beta2.ServiceAccountSubject +- name: io.k8s.api.flowcontrol.v1beta3.ServiceAccountSubject map: fields: - name: name @@ -9137,22 +10159,22 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta2.Subject +- name: io.k8s.api.flowcontrol.v1beta3.Subject map: fields: - name: group type: - namedType: io.k8s.api.flowcontrol.v1beta2.GroupSubject + namedType: io.k8s.api.flowcontrol.v1beta3.GroupSubject - name: kind type: scalar: string default: "" - name: serviceAccount type: - namedType: io.k8s.api.flowcontrol.v1beta2.ServiceAccountSubject + namedType: io.k8s.api.flowcontrol.v1beta3.ServiceAccountSubject - name: user type: - namedType: io.k8s.api.flowcontrol.v1beta2.UserSubject + namedType: io.k8s.api.flowcontrol.v1beta3.UserSubject unions: - discriminator: kind fields: @@ -9162,7 +10184,7 @@ var schemaYAML = typed.YAMLObject(`types: discriminatorValue: ServiceAccount - fieldName: user discriminatorValue: User -- name: io.k8s.api.flowcontrol.v1beta2.UserSubject +- name: io.k8s.api.flowcontrol.v1beta3.UserSubject map: fields: - name: name @@ -9339,6 +10361,44 @@ var schemaYAML = typed.YAMLObject(`types: - name: parameters type: namedType: io.k8s.api.networking.v1.IngressClassParametersReference +- name: io.k8s.api.networking.v1.IngressLoadBalancerIngress + map: + fields: + - name: hostname + type: + scalar: string + - name: ip + type: + scalar: string + - name: ports + type: + list: + elementType: + namedType: io.k8s.api.networking.v1.IngressPortStatus + elementRelationship: atomic +- name: io.k8s.api.networking.v1.IngressLoadBalancerStatus + map: + fields: + - name: ingress + type: + list: + elementType: + namedType: io.k8s.api.networking.v1.IngressLoadBalancerIngress + elementRelationship: atomic +- name: io.k8s.api.networking.v1.IngressPortStatus + map: + fields: + - name: error + type: + scalar: string + - name: port + type: + scalar: numeric + default: 0 + - name: protocol + type: + scalar: string + default: "" - name: io.k8s.api.networking.v1.IngressRule map: fields: @@ -9385,7 +10445,7 @@ var schemaYAML = typed.YAMLObject(`types: fields: - name: loadBalancer type: - namedType: io.k8s.api.core.v1.LoadBalancerStatus + namedType: io.k8s.api.networking.v1.IngressLoadBalancerStatus default: {} - name: io.k8s.api.networking.v1.IngressTLS map: @@ -9416,10 +10476,6 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.networking.v1.NetworkPolicySpec default: {} - - name: status - type: - namedType: io.k8s.api.networking.v1.NetworkPolicyStatus - default: {} - name: io.k8s.api.networking.v1.NetworkPolicyEgressRule map: fields: @@ -9499,26 +10555,91 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic -- name: io.k8s.api.networking.v1.NetworkPolicyStatus +- name: io.k8s.api.networking.v1.ServiceBackendPort + map: + fields: + - name: name + type: + scalar: string + - name: number + type: + scalar: numeric +- name: io.k8s.api.networking.v1alpha1.ClusterCIDR + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.networking.v1alpha1.ClusterCIDRSpec + default: {} +- name: io.k8s.api.networking.v1alpha1.ClusterCIDRSpec + map: + fields: + - name: ipv4 + type: + scalar: string + default: "" + - name: ipv6 + type: + scalar: string + default: "" + - name: nodeSelector + type: + namedType: io.k8s.api.core.v1.NodeSelector + - name: perNodeHostBits + type: + scalar: numeric + default: 0 +- name: io.k8s.api.networking.v1alpha1.IPAddress + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.networking.v1alpha1.IPAddressSpec + default: {} +- name: io.k8s.api.networking.v1alpha1.IPAddressSpec map: fields: - - name: conditions + - name: parentRef type: - list: - elementType: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition - elementRelationship: associative - keys: - - type -- name: io.k8s.api.networking.v1.ServiceBackendPort + namedType: io.k8s.api.networking.v1alpha1.ParentReference +- name: io.k8s.api.networking.v1alpha1.ParentReference map: fields: + - name: group + type: + scalar: string - name: name type: scalar: string - - name: number + - name: namespace type: - scalar: numeric + scalar: string + - name: resource + type: + scalar: string + - name: uid + type: + scalar: string - name: io.k8s.api.networking.v1beta1.HTTPIngressPath map: fields: @@ -9621,6 +10742,44 @@ var schemaYAML = typed.YAMLObject(`types: - name: parameters type: namedType: io.k8s.api.networking.v1beta1.IngressClassParametersReference +- name: io.k8s.api.networking.v1beta1.IngressLoadBalancerIngress + map: + fields: + - name: hostname + type: + scalar: string + - name: ip + type: + scalar: string + - name: ports + type: + list: + elementType: + namedType: io.k8s.api.networking.v1beta1.IngressPortStatus + elementRelationship: atomic +- name: io.k8s.api.networking.v1beta1.IngressLoadBalancerStatus + map: + fields: + - name: ingress + type: + list: + elementType: + namedType: io.k8s.api.networking.v1beta1.IngressLoadBalancerIngress + elementRelationship: atomic +- name: io.k8s.api.networking.v1beta1.IngressPortStatus + map: + fields: + - name: error + type: + scalar: string + - name: port + type: + scalar: numeric + default: 0 + - name: protocol + type: + scalar: string + default: "" - name: io.k8s.api.networking.v1beta1.IngressRule map: fields: @@ -9656,7 +10815,7 @@ var schemaYAML = typed.YAMLObject(`types: fields: - name: loadBalancer type: - namedType: io.k8s.api.core.v1.LoadBalancerStatus + namedType: io.k8s.api.networking.v1beta1.IngressLoadBalancerStatus default: {} - name: io.k8s.api.networking.v1beta1.IngressTLS map: @@ -9864,6 +11023,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: selector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: unhealthyPodEvictionPolicy + type: + scalar: string - name: io.k8s.api.policy.v1.PodDisruptionBudgetStatus map: fields: @@ -10005,6 +11167,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: selector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: unhealthyPodEvictionPolicy + type: + scalar: string - name: io.k8s.api.policy.v1beta1.PodDisruptionBudgetStatus map: fields: @@ -10714,6 +11879,240 @@ var schemaYAML = typed.YAMLObject(`types: - name: namespace type: scalar: string +- name: io.k8s.api.resource.v1alpha2.AllocationResult + map: + fields: + - name: availableOnNodes + type: + namedType: io.k8s.api.core.v1.NodeSelector + - name: resourceHandles + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha2.ResourceHandle + elementRelationship: atomic + - name: shareable + type: + scalar: boolean +- name: io.k8s.api.resource.v1alpha2.PodSchedulingContext + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec + default: {} + - name: status + type: + namedType: io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus + default: {} +- name: io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec + map: + fields: + - name: potentialNodes + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: selectedNode + type: + scalar: string +- name: io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus + map: + fields: + - name: resourceClaims + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus + elementRelationship: associative + keys: + - name +- name: io.k8s.api.resource.v1alpha2.ResourceClaim + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSpec + default: {} + - name: status + type: + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimStatus + default: {} +- name: io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference + map: + fields: + - name: apiGroup + type: + scalar: string + - name: name + type: + scalar: string + default: "" + - name: resource + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference + map: + fields: + - name: apiGroup + type: + scalar: string + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus + map: + fields: + - name: name + type: + scalar: string + - name: unsuitableNodes + type: + list: + elementType: + scalar: string + elementRelationship: associative +- name: io.k8s.api.resource.v1alpha2.ResourceClaimSpec + map: + fields: + - name: allocationMode + type: + scalar: string + - name: parametersRef + type: + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference + - name: resourceClassName + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha2.ResourceClaimStatus + map: + fields: + - name: allocation + type: + namedType: io.k8s.api.resource.v1alpha2.AllocationResult + - name: deallocationRequested + type: + scalar: boolean + - name: driverName + type: + scalar: string + - name: reservedFor + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference + elementRelationship: associative + keys: + - uid +- name: io.k8s.api.resource.v1alpha2.ResourceClaimTemplate + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec + default: {} +- name: io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec + map: + fields: + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSpec + default: {} +- name: io.k8s.api.resource.v1alpha2.ResourceClass + map: + fields: + - name: apiVersion + type: + scalar: string + - name: driverName + type: + scalar: string + default: "" + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: parametersRef + type: + namedType: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference + - name: suitableNodes + type: + namedType: io.k8s.api.core.v1.NodeSelector +- name: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference + map: + fields: + - name: apiGroup + type: + scalar: string + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: namespace + type: + scalar: string +- name: io.k8s.api.resource.v1alpha2.ResourceHandle + map: + fields: + - name: data + type: + scalar: string + - name: driverName + type: + scalar: string - name: io.k8s.api.scheduling.v1.PriorityClass map: fields: @@ -10824,6 +12223,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: requiresRepublish type: scalar: boolean + - name: seLinuxMount + type: + scalar: boolean - name: storageCapacity type: scalar: boolean @@ -11176,6 +12578,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: requiresRepublish type: scalar: boolean + - name: seLinuxMount + type: + scalar: boolean - name: storageCapacity type: scalar: boolean @@ -11530,9 +12935,6 @@ var schemaYAML = typed.YAMLObject(`types: map: elementType: scalar: string - - name: clusterName - type: - scalar: string - name: creationTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go new file mode 100644 index 000000000..444275a12 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go @@ -0,0 +1,62 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// IngressLoadBalancerIngressApplyConfiguration represents an declarative configuration of the IngressLoadBalancerIngress type for use +// with apply. +type IngressLoadBalancerIngressApplyConfiguration struct { + IP *string `json:"ip,omitempty"` + Hostname *string `json:"hostname,omitempty"` + Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"` +} + +// IngressLoadBalancerIngressApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerIngress type for use with +// apply. +func IngressLoadBalancerIngress() *IngressLoadBalancerIngressApplyConfiguration { + return &IngressLoadBalancerIngressApplyConfiguration{} +} + +// WithIP sets the IP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IP field is set to the value of the last call. +func (b *IngressLoadBalancerIngressApplyConfiguration) WithIP(value string) *IngressLoadBalancerIngressApplyConfiguration { + b.IP = &value + return b +} + +// WithHostname sets the Hostname field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Hostname field is set to the value of the last call. +func (b *IngressLoadBalancerIngressApplyConfiguration) WithHostname(value string) *IngressLoadBalancerIngressApplyConfiguration { + b.Hostname = &value + return b +} + +// WithPorts adds the given value to the Ports field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ports field. +func (b *IngressLoadBalancerIngressApplyConfiguration) WithPorts(values ...*IngressPortStatusApplyConfiguration) *IngressLoadBalancerIngressApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPorts") + } + b.Ports = append(b.Ports, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go new file mode 100644 index 000000000..8e01a301a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// IngressLoadBalancerStatusApplyConfiguration represents an declarative configuration of the IngressLoadBalancerStatus type for use +// with apply. +type IngressLoadBalancerStatusApplyConfiguration struct { + Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"` +} + +// IngressLoadBalancerStatusApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerStatus type for use with +// apply. +func IngressLoadBalancerStatus() *IngressLoadBalancerStatusApplyConfiguration { + return &IngressLoadBalancerStatusApplyConfiguration{} +} + +// WithIngress adds the given value to the Ingress field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ingress field. +func (b *IngressLoadBalancerStatusApplyConfiguration) WithIngress(values ...*IngressLoadBalancerIngressApplyConfiguration) *IngressLoadBalancerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithIngress") + } + b.Ingress = append(b.Ingress, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go new file mode 100644 index 000000000..82b5babd9 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// IngressPortStatusApplyConfiguration represents an declarative configuration of the IngressPortStatus type for use +// with apply. +type IngressPortStatusApplyConfiguration struct { + Port *int32 `json:"port,omitempty"` + Protocol *v1.Protocol `json:"protocol,omitempty"` + Error *string `json:"error,omitempty"` +} + +// IngressPortStatusApplyConfiguration constructs an declarative configuration of the IngressPortStatus type for use with +// apply. +func IngressPortStatus() *IngressPortStatusApplyConfiguration { + return &IngressPortStatusApplyConfiguration{} +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *IngressPortStatusApplyConfiguration) WithPort(value int32) *IngressPortStatusApplyConfiguration { + b.Port = &value + return b +} + +// WithProtocol sets the Protocol field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Protocol field is set to the value of the last call. +func (b *IngressPortStatusApplyConfiguration) WithProtocol(value v1.Protocol) *IngressPortStatusApplyConfiguration { + b.Protocol = &value + return b +} + +// WithError sets the Error field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Error field is set to the value of the last call. +func (b *IngressPortStatusApplyConfiguration) WithError(value string) *IngressPortStatusApplyConfiguration { + b.Error = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go index dd8b25d83..7131bf8d0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go @@ -18,14 +18,10 @@ limitations under the License. package v1 -import ( - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - // IngressStatusApplyConfiguration represents an declarative configuration of the IngressStatus type for use // with apply. type IngressStatusApplyConfiguration struct { - LoadBalancer *v1.LoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` + LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` } // IngressStatusApplyConfiguration constructs an declarative configuration of the IngressStatus type for use with @@ -37,7 +33,7 @@ func IngressStatus() *IngressStatusApplyConfiguration { // WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LoadBalancer field is set to the value of the last call. -func (b *IngressStatusApplyConfiguration) WithLoadBalancer(value *v1.LoadBalancerStatusApplyConfiguration) *IngressStatusApplyConfiguration { +func (b *IngressStatusApplyConfiguration) WithLoadBalancer(value *IngressLoadBalancerStatusApplyConfiguration) *IngressStatusApplyConfiguration { b.LoadBalancer = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go index 101510e45..409507310 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go @@ -32,8 +32,7 @@ import ( type NetworkPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"` - Status *NetworkPolicyStatusApplyConfiguration `json:"status,omitempty"` + Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"` } // NetworkPolicy constructs an declarative configuration of the NetworkPolicy type for use with @@ -248,11 +247,3 @@ func (b *NetworkPolicyApplyConfiguration) WithSpec(value *NetworkPolicySpecApply b.Spec = value return b } - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *NetworkPolicyApplyConfiguration) WithStatus(value *NetworkPolicyStatusApplyConfiguration) *NetworkPolicyApplyConfiguration { - b.Status = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go new file mode 100644 index 000000000..ad0eae919 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go @@ -0,0 +1,247 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterCIDRApplyConfiguration represents an declarative configuration of the ClusterCIDR type for use +// with apply. +type ClusterCIDRApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ClusterCIDRSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ClusterCIDR constructs an declarative configuration of the ClusterCIDR type for use with +// apply. +func ClusterCIDR(name string) *ClusterCIDRApplyConfiguration { + b := &ClusterCIDRApplyConfiguration{} + b.WithName(name) + b.WithKind("ClusterCIDR") + b.WithAPIVersion("networking.k8s.io/v1alpha1") + return b +} + +// ExtractClusterCIDR extracts the applied configuration owned by fieldManager from +// clusterCIDR. If no managedFields are found in clusterCIDR for fieldManager, a +// ClusterCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// clusterCIDR must be a unmodified ClusterCIDR API object that was retrieved from the Kubernetes API. +// ExtractClusterCIDR provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string) (*ClusterCIDRApplyConfiguration, error) { + return extractClusterCIDR(clusterCIDR, fieldManager, "") +} + +// ExtractClusterCIDRStatus is the same as ExtractClusterCIDR except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterCIDRStatus(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string) (*ClusterCIDRApplyConfiguration, error) { + return extractClusterCIDR(clusterCIDR, fieldManager, "status") +} + +func extractClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string, subresource string) (*ClusterCIDRApplyConfiguration, error) { + b := &ClusterCIDRApplyConfiguration{} + err := managedfields.ExtractInto(clusterCIDR, internal.Parser().Type("io.k8s.api.networking.v1alpha1.ClusterCIDR"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(clusterCIDR.Name) + + b.WithKind("ClusterCIDR") + b.WithAPIVersion("networking.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithKind(value string) *ClusterCIDRApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithAPIVersion(value string) *ClusterCIDRApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithName(value string) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithGenerateName(value string) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithNamespace(value string) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithUID(value types.UID) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithResourceVersion(value string) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithGeneration(value int64) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ClusterCIDRApplyConfiguration) WithLabels(entries map[string]string) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ClusterCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ClusterCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ClusterCIDRApplyConfiguration) WithFinalizers(values ...string) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ClusterCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithSpec(value *ClusterCIDRSpecApplyConfiguration) *ClusterCIDRApplyConfiguration { + b.Spec = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go new file mode 100644 index 000000000..8d5fa406b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/core/v1" +) + +// ClusterCIDRSpecApplyConfiguration represents an declarative configuration of the ClusterCIDRSpec type for use +// with apply. +type ClusterCIDRSpecApplyConfiguration struct { + NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` + PerNodeHostBits *int32 `json:"perNodeHostBits,omitempty"` + IPv4 *string `json:"ipv4,omitempty"` + IPv6 *string `json:"ipv6,omitempty"` +} + +// ClusterCIDRSpecApplyConfiguration constructs an declarative configuration of the ClusterCIDRSpec type for use with +// apply. +func ClusterCIDRSpec() *ClusterCIDRSpecApplyConfiguration { + return &ClusterCIDRSpecApplyConfiguration{} +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *ClusterCIDRSpecApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *ClusterCIDRSpecApplyConfiguration { + b.NodeSelector = value + return b +} + +// WithPerNodeHostBits sets the PerNodeHostBits field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PerNodeHostBits field is set to the value of the last call. +func (b *ClusterCIDRSpecApplyConfiguration) WithPerNodeHostBits(value int32) *ClusterCIDRSpecApplyConfiguration { + b.PerNodeHostBits = &value + return b +} + +// WithIPv4 sets the IPv4 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPv4 field is set to the value of the last call. +func (b *ClusterCIDRSpecApplyConfiguration) WithIPv4(value string) *ClusterCIDRSpecApplyConfiguration { + b.IPv4 = &value + return b +} + +// WithIPv6 sets the IPv6 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPv6 field is set to the value of the last call. +func (b *ClusterCIDRSpecApplyConfiguration) WithIPv6(value string) *ClusterCIDRSpecApplyConfiguration { + b.IPv6 = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go similarity index 66% rename from vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go rename to vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go index c70906cfa..da6822111 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1beta1 +package v1alpha1 import ( - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,63 +27,63 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodSecurityPolicyApplyConfiguration represents an declarative configuration of the PodSecurityPolicy type for use +// IPAddressApplyConfiguration represents an declarative configuration of the IPAddress type for use // with apply. -type PodSecurityPolicyApplyConfiguration struct { +type IPAddressApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodSecurityPolicySpecApplyConfiguration `json:"spec,omitempty"` + Spec *IPAddressSpecApplyConfiguration `json:"spec,omitempty"` } -// PodSecurityPolicy constructs an declarative configuration of the PodSecurityPolicy type for use with +// IPAddress constructs an declarative configuration of the IPAddress type for use with // apply. -func PodSecurityPolicy(name string) *PodSecurityPolicyApplyConfiguration { - b := &PodSecurityPolicyApplyConfiguration{} +func IPAddress(name string) *IPAddressApplyConfiguration { + b := &IPAddressApplyConfiguration{} b.WithName(name) - b.WithKind("PodSecurityPolicy") - b.WithAPIVersion("extensions/v1beta1") + b.WithKind("IPAddress") + b.WithAPIVersion("networking.k8s.io/v1alpha1") return b } -// ExtractPodSecurityPolicy extracts the applied configuration owned by fieldManager from -// podSecurityPolicy. If no managedFields are found in podSecurityPolicy for fieldManager, a -// PodSecurityPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// ExtractIPAddress extracts the applied configuration owned by fieldManager from +// iPAddress. If no managedFields are found in iPAddress for fieldManager, a +// IPAddressApplyConfiguration is returned with only the Name, Namespace (if applicable), // APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. -// podSecurityPolicy must be a unmodified PodSecurityPolicy API object that was retrieved from the Kubernetes API. -// ExtractPodSecurityPolicy provides a way to perform a extract/modify-in-place/apply workflow. +// iPAddress must be a unmodified IPAddress API object that was retrieved from the Kubernetes API. +// ExtractIPAddress provides a way to perform a extract/modify-in-place/apply workflow. // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPodSecurityPolicy(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { - return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "") +func ExtractIPAddress(iPAddress *networkingv1alpha1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) { + return extractIPAddress(iPAddress, fieldManager, "") } -// ExtractPodSecurityPolicyStatus is the same as ExtractPodSecurityPolicy except +// ExtractIPAddressStatus is the same as ExtractIPAddress except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodSecurityPolicyStatus(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { - return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "status") +func ExtractIPAddressStatus(iPAddress *networkingv1alpha1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) { + return extractIPAddress(iPAddress, fieldManager, "status") } -func extractPodSecurityPolicy(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string, subresource string) (*PodSecurityPolicyApplyConfiguration, error) { - b := &PodSecurityPolicyApplyConfiguration{} - err := managedfields.ExtractInto(podSecurityPolicy, internal.Parser().Type("io.k8s.api.extensions.v1beta1.PodSecurityPolicy"), fieldManager, b, subresource) +func extractIPAddress(iPAddress *networkingv1alpha1.IPAddress, fieldManager string, subresource string) (*IPAddressApplyConfiguration, error) { + b := &IPAddressApplyConfiguration{} + err := managedfields.ExtractInto(iPAddress, internal.Parser().Type("io.k8s.api.networking.v1alpha1.IPAddress"), fieldManager, b, subresource) if err != nil { return nil, err } - b.WithName(podSecurityPolicy.Name) + b.WithName(iPAddress.Name) - b.WithKind("PodSecurityPolicy") - b.WithAPIVersion("extensions/v1beta1") + b.WithKind("IPAddress") + b.WithAPIVersion("networking.k8s.io/v1alpha1") return b, nil } // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithKind(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithKind(value string) *IPAddressApplyConfiguration { b.Kind = &value return b } @@ -91,7 +91,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithKind(value string) *PodSecurit // WithAPIVersion sets the APIVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithAPIVersion(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithAPIVersion(value string) *IPAddressApplyConfiguration { b.APIVersion = &value return b } @@ -99,7 +99,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithAPIVersion(value string) *PodS // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithName(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithName(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Name = &value return b @@ -108,7 +108,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithName(value string) *PodSecurit // WithGenerateName sets the GenerateName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GenerateName field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithGenerateName(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithGenerateName(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.GenerateName = &value return b @@ -117,7 +117,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithGenerateName(value string) *Po // WithNamespace sets the Namespace field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Namespace field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithNamespace(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithNamespace(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Namespace = &value return b @@ -126,7 +126,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithNamespace(value string) *PodSe // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithUID(value types.UID) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithUID(value types.UID) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.UID = &value return b @@ -135,7 +135,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithUID(value types.UID) *PodSecur // WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithResourceVersion(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithResourceVersion(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.ResourceVersion = &value return b @@ -144,7 +144,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithResourceVersion(value string) // WithGeneration sets the Generation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Generation field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithGeneration(value int64) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithGeneration(value int64) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Generation = &value return b @@ -153,7 +153,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithGeneration(value int64) *PodSe // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.CreationTimestamp = &value return b @@ -162,7 +162,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithCreationTimestamp(value metav1 // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionTimestamp = &value return b @@ -171,7 +171,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithDeletionTimestamp(value metav1 // WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionGracePeriodSeconds = &value return b @@ -181,7 +181,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(val // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Labels field, // overwriting an existing map entries in Labels field with the same key. -func (b *PodSecurityPolicyApplyConfiguration) WithLabels(entries map[string]string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithLabels(entries map[string]string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Labels == nil && len(entries) > 0 { b.Labels = make(map[string]string, len(entries)) @@ -196,7 +196,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithLabels(entries map[string]stri // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Annotations field, // overwriting an existing map entries in Annotations field with the same key. -func (b *PodSecurityPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithAnnotations(entries map[string]string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Annotations == nil && len(entries) > 0 { b.Annotations = make(map[string]string, len(entries)) @@ -210,7 +210,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithAnnotations(entries map[string // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodSecurityPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { @@ -224,7 +224,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithOwnerReferences(values ...*v1. // WithFinalizers adds the given value to the Finalizers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *PodSecurityPolicyApplyConfiguration) WithFinalizers(values ...string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithFinalizers(values ...string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { b.Finalizers = append(b.Finalizers, values[i]) @@ -232,7 +232,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithFinalizers(values ...string) * return b } -func (b *PodSecurityPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { +func (b *IPAddressApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} } @@ -241,7 +241,7 @@ func (b *PodSecurityPolicyApplyConfiguration) ensureObjectMetaApplyConfiguration // WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Spec field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithSpec(value *PodSecurityPolicySpecApplyConfiguration) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithSpec(value *IPAddressSpecApplyConfiguration) *IPAddressApplyConfiguration { b.Spec = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go new file mode 100644 index 000000000..064963d69 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// IPAddressSpecApplyConfiguration represents an declarative configuration of the IPAddressSpec type for use +// with apply. +type IPAddressSpecApplyConfiguration struct { + ParentRef *ParentReferenceApplyConfiguration `json:"parentRef,omitempty"` +} + +// IPAddressSpecApplyConfiguration constructs an declarative configuration of the IPAddressSpec type for use with +// apply. +func IPAddressSpec() *IPAddressSpecApplyConfiguration { + return &IPAddressSpecApplyConfiguration{} +} + +// WithParentRef sets the ParentRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParentRef field is set to the value of the last call. +func (b *IPAddressSpecApplyConfiguration) WithParentRef(value *ParentReferenceApplyConfiguration) *IPAddressSpecApplyConfiguration { + b.ParentRef = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go new file mode 100644 index 000000000..14b10b19f --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go @@ -0,0 +1,79 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + types "k8s.io/apimachinery/pkg/types" +) + +// ParentReferenceApplyConfiguration represents an declarative configuration of the ParentReference type for use +// with apply. +type ParentReferenceApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Resource *string `json:"resource,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` + UID *types.UID `json:"uid,omitempty"` +} + +// ParentReferenceApplyConfiguration constructs an declarative configuration of the ParentReference type for use with +// apply. +func ParentReference() *ParentReferenceApplyConfiguration { + return &ParentReferenceApplyConfiguration{} +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithGroup(value string) *ParentReferenceApplyConfiguration { + b.Group = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithResource(value string) *ParentReferenceApplyConfiguration { + b.Resource = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithNamespace(value string) *ParentReferenceApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithName(value string) *ParentReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithUID(value types.UID) *ParentReferenceApplyConfiguration { + b.UID = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go new file mode 100644 index 000000000..20bf63780 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go @@ -0,0 +1,62 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// IngressLoadBalancerIngressApplyConfiguration represents an declarative configuration of the IngressLoadBalancerIngress type for use +// with apply. +type IngressLoadBalancerIngressApplyConfiguration struct { + IP *string `json:"ip,omitempty"` + Hostname *string `json:"hostname,omitempty"` + Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"` +} + +// IngressLoadBalancerIngressApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerIngress type for use with +// apply. +func IngressLoadBalancerIngress() *IngressLoadBalancerIngressApplyConfiguration { + return &IngressLoadBalancerIngressApplyConfiguration{} +} + +// WithIP sets the IP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IP field is set to the value of the last call. +func (b *IngressLoadBalancerIngressApplyConfiguration) WithIP(value string) *IngressLoadBalancerIngressApplyConfiguration { + b.IP = &value + return b +} + +// WithHostname sets the Hostname field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Hostname field is set to the value of the last call. +func (b *IngressLoadBalancerIngressApplyConfiguration) WithHostname(value string) *IngressLoadBalancerIngressApplyConfiguration { + b.Hostname = &value + return b +} + +// WithPorts adds the given value to the Ports field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ports field. +func (b *IngressLoadBalancerIngressApplyConfiguration) WithPorts(values ...*IngressPortStatusApplyConfiguration) *IngressLoadBalancerIngressApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPorts") + } + b.Ports = append(b.Ports, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go new file mode 100644 index 000000000..e16dd2363 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// IngressLoadBalancerStatusApplyConfiguration represents an declarative configuration of the IngressLoadBalancerStatus type for use +// with apply. +type IngressLoadBalancerStatusApplyConfiguration struct { + Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"` +} + +// IngressLoadBalancerStatusApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerStatus type for use with +// apply. +func IngressLoadBalancerStatus() *IngressLoadBalancerStatusApplyConfiguration { + return &IngressLoadBalancerStatusApplyConfiguration{} +} + +// WithIngress adds the given value to the Ingress field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ingress field. +func (b *IngressLoadBalancerStatusApplyConfiguration) WithIngress(values ...*IngressLoadBalancerIngressApplyConfiguration) *IngressLoadBalancerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithIngress") + } + b.Ingress = append(b.Ingress, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go new file mode 100644 index 000000000..083653797 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// IngressPortStatusApplyConfiguration represents an declarative configuration of the IngressPortStatus type for use +// with apply. +type IngressPortStatusApplyConfiguration struct { + Port *int32 `json:"port,omitempty"` + Protocol *v1.Protocol `json:"protocol,omitempty"` + Error *string `json:"error,omitempty"` +} + +// IngressPortStatusApplyConfiguration constructs an declarative configuration of the IngressPortStatus type for use with +// apply. +func IngressPortStatus() *IngressPortStatusApplyConfiguration { + return &IngressPortStatusApplyConfiguration{} +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *IngressPortStatusApplyConfiguration) WithPort(value int32) *IngressPortStatusApplyConfiguration { + b.Port = &value + return b +} + +// WithProtocol sets the Protocol field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Protocol field is set to the value of the last call. +func (b *IngressPortStatusApplyConfiguration) WithProtocol(value v1.Protocol) *IngressPortStatusApplyConfiguration { + b.Protocol = &value + return b +} + +// WithError sets the Error field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Error field is set to the value of the last call. +func (b *IngressPortStatusApplyConfiguration) WithError(value string) *IngressPortStatusApplyConfiguration { + b.Error = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go index 941769594..faa7e2446 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go @@ -18,14 +18,10 @@ limitations under the License. package v1beta1 -import ( - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - // IngressStatusApplyConfiguration represents an declarative configuration of the IngressStatus type for use // with apply. type IngressStatusApplyConfiguration struct { - LoadBalancer *v1.LoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` + LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` } // IngressStatusApplyConfiguration constructs an declarative configuration of the IngressStatus type for use with @@ -37,7 +33,7 @@ func IngressStatus() *IngressStatusApplyConfiguration { // WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LoadBalancer field is set to the value of the last call. -func (b *IngressStatusApplyConfiguration) WithLoadBalancer(value *v1.LoadBalancerStatusApplyConfiguration) *IngressStatusApplyConfiguration { +func (b *IngressStatusApplyConfiguration) WithLoadBalancer(value *IngressLoadBalancerStatusApplyConfiguration) *IngressStatusApplyConfiguration { b.LoadBalancer = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go index e2f49f528..67d9ba6bb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + policyv1 "k8s.io/api/policy/v1" intstr "k8s.io/apimachinery/pkg/util/intstr" v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) @@ -26,9 +27,10 @@ import ( // PodDisruptionBudgetSpecApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetSpec type for use // with apply. type PodDisruptionBudgetSpecApplyConfiguration struct { - MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` + MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` + UnhealthyPodEvictionPolicy *policyv1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"` } // PodDisruptionBudgetSpecApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetSpec type for use with @@ -60,3 +62,11 @@ func (b *PodDisruptionBudgetSpecApplyConfiguration) WithMaxUnavailable(value int b.MaxUnavailable = &value return b } + +// WithUnhealthyPodEvictionPolicy sets the UnhealthyPodEvictionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnhealthyPodEvictionPolicy field is set to the value of the last call. +func (b *PodDisruptionBudgetSpecApplyConfiguration) WithUnhealthyPodEvictionPolicy(value policyv1.UnhealthyPodEvictionPolicyType) *PodDisruptionBudgetSpecApplyConfiguration { + b.UnhealthyPodEvictionPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go index b5d17d3fe..0ba3ea1c2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + v1beta1 "k8s.io/api/policy/v1beta1" intstr "k8s.io/apimachinery/pkg/util/intstr" v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) @@ -26,9 +27,10 @@ import ( // PodDisruptionBudgetSpecApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetSpec type for use // with apply. type PodDisruptionBudgetSpecApplyConfiguration struct { - MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` + MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` + UnhealthyPodEvictionPolicy *v1beta1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"` } // PodDisruptionBudgetSpecApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetSpec type for use with @@ -60,3 +62,11 @@ func (b *PodDisruptionBudgetSpecApplyConfiguration) WithMaxUnavailable(value int b.MaxUnavailable = &value return b } + +// WithUnhealthyPodEvictionPolicy sets the UnhealthyPodEvictionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnhealthyPodEvictionPolicy field is set to the value of the last call. +func (b *PodDisruptionBudgetSpecApplyConfiguration) WithUnhealthyPodEvictionPolicy(value v1beta1.UnhealthyPodEvictionPolicyType) *PodDisruptionBudgetSpecApplyConfiguration { + b.UnhealthyPodEvictionPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go new file mode 100644 index 000000000..bc6078aa9 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + v1 "k8s.io/client-go/applyconfigurations/core/v1" +) + +// AllocationResultApplyConfiguration represents an declarative configuration of the AllocationResult type for use +// with apply. +type AllocationResultApplyConfiguration struct { + ResourceHandles []ResourceHandleApplyConfiguration `json:"resourceHandles,omitempty"` + AvailableOnNodes *v1.NodeSelectorApplyConfiguration `json:"availableOnNodes,omitempty"` + Shareable *bool `json:"shareable,omitempty"` +} + +// AllocationResultApplyConfiguration constructs an declarative configuration of the AllocationResult type for use with +// apply. +func AllocationResult() *AllocationResultApplyConfiguration { + return &AllocationResultApplyConfiguration{} +} + +// WithResourceHandles adds the given value to the ResourceHandles field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceHandles field. +func (b *AllocationResultApplyConfiguration) WithResourceHandles(values ...*ResourceHandleApplyConfiguration) *AllocationResultApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceHandles") + } + b.ResourceHandles = append(b.ResourceHandles, *values[i]) + } + return b +} + +// WithAvailableOnNodes sets the AvailableOnNodes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AvailableOnNodes field is set to the value of the last call. +func (b *AllocationResultApplyConfiguration) WithAvailableOnNodes(value *v1.NodeSelectorApplyConfiguration) *AllocationResultApplyConfiguration { + b.AvailableOnNodes = value + return b +} + +// WithShareable sets the Shareable field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Shareable field is set to the value of the last call. +func (b *AllocationResultApplyConfiguration) WithShareable(value bool) *AllocationResultApplyConfiguration { + b.Shareable = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go new file mode 100644 index 000000000..1dfb6ff97 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go @@ -0,0 +1,258 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// PodSchedulingContextApplyConfiguration represents an declarative configuration of the PodSchedulingContext type for use +// with apply. +type PodSchedulingContextApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PodSchedulingContextSpecApplyConfiguration `json:"spec,omitempty"` + Status *PodSchedulingContextStatusApplyConfiguration `json:"status,omitempty"` +} + +// PodSchedulingContext constructs an declarative configuration of the PodSchedulingContext type for use with +// apply. +func PodSchedulingContext(name, namespace string) *PodSchedulingContextApplyConfiguration { + b := &PodSchedulingContextApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("PodSchedulingContext") + b.WithAPIVersion("resource.k8s.io/v1alpha2") + return b +} + +// ExtractPodSchedulingContext extracts the applied configuration owned by fieldManager from +// podSchedulingContext. If no managedFields are found in podSchedulingContext for fieldManager, a +// PodSchedulingContextApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// podSchedulingContext must be a unmodified PodSchedulingContext API object that was retrieved from the Kubernetes API. +// ExtractPodSchedulingContext provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractPodSchedulingContext(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { + return extractPodSchedulingContext(podSchedulingContext, fieldManager, "") +} + +// ExtractPodSchedulingContextStatus is the same as ExtractPodSchedulingContext except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPodSchedulingContextStatus(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { + return extractPodSchedulingContext(podSchedulingContext, fieldManager, "status") +} + +func extractPodSchedulingContext(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string, subresource string) (*PodSchedulingContextApplyConfiguration, error) { + b := &PodSchedulingContextApplyConfiguration{} + err := managedfields.ExtractInto(podSchedulingContext, internal.Parser().Type("io.k8s.api.resource.v1alpha2.PodSchedulingContext"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(podSchedulingContext.Name) + b.WithNamespace(podSchedulingContext.Namespace) + + b.WithKind("PodSchedulingContext") + b.WithAPIVersion("resource.k8s.io/v1alpha2") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithKind(value string) *PodSchedulingContextApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithAPIVersion(value string) *PodSchedulingContextApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithName(value string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithGenerateName(value string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithNamespace(value string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithUID(value types.UID) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithResourceVersion(value string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithGeneration(value int64) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *PodSchedulingContextApplyConfiguration) WithLabels(entries map[string]string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *PodSchedulingContextApplyConfiguration) WithAnnotations(entries map[string]string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *PodSchedulingContextApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *PodSchedulingContextApplyConfiguration) WithFinalizers(values ...string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *PodSchedulingContextApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithSpec(value *PodSchedulingContextSpecApplyConfiguration) *PodSchedulingContextApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithStatus(value *PodSchedulingContextStatusApplyConfiguration) *PodSchedulingContextApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go new file mode 100644 index 000000000..c95d3295e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go @@ -0,0 +1,50 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +// PodSchedulingContextSpecApplyConfiguration represents an declarative configuration of the PodSchedulingContextSpec type for use +// with apply. +type PodSchedulingContextSpecApplyConfiguration struct { + SelectedNode *string `json:"selectedNode,omitempty"` + PotentialNodes []string `json:"potentialNodes,omitempty"` +} + +// PodSchedulingContextSpecApplyConfiguration constructs an declarative configuration of the PodSchedulingContextSpec type for use with +// apply. +func PodSchedulingContextSpec() *PodSchedulingContextSpecApplyConfiguration { + return &PodSchedulingContextSpecApplyConfiguration{} +} + +// WithSelectedNode sets the SelectedNode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SelectedNode field is set to the value of the last call. +func (b *PodSchedulingContextSpecApplyConfiguration) WithSelectedNode(value string) *PodSchedulingContextSpecApplyConfiguration { + b.SelectedNode = &value + return b +} + +// WithPotentialNodes adds the given value to the PotentialNodes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PotentialNodes field. +func (b *PodSchedulingContextSpecApplyConfiguration) WithPotentialNodes(values ...string) *PodSchedulingContextSpecApplyConfiguration { + for i := range values { + b.PotentialNodes = append(b.PotentialNodes, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go new file mode 100644 index 000000000..a8b10b9a0 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +// PodSchedulingContextStatusApplyConfiguration represents an declarative configuration of the PodSchedulingContextStatus type for use +// with apply. +type PodSchedulingContextStatusApplyConfiguration struct { + ResourceClaims []ResourceClaimSchedulingStatusApplyConfiguration `json:"resourceClaims,omitempty"` +} + +// PodSchedulingContextStatusApplyConfiguration constructs an declarative configuration of the PodSchedulingContextStatus type for use with +// apply. +func PodSchedulingContextStatus() *PodSchedulingContextStatusApplyConfiguration { + return &PodSchedulingContextStatusApplyConfiguration{} +} + +// WithResourceClaims adds the given value to the ResourceClaims field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceClaims field. +func (b *PodSchedulingContextStatusApplyConfiguration) WithResourceClaims(values ...*ResourceClaimSchedulingStatusApplyConfiguration) *PodSchedulingContextStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceClaims") + } + b.ResourceClaims = append(b.ResourceClaims, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go new file mode 100644 index 000000000..6c219f837 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go @@ -0,0 +1,258 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ResourceClaimApplyConfiguration represents an declarative configuration of the ResourceClaim type for use +// with apply. +type ResourceClaimApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"` + Status *ResourceClaimStatusApplyConfiguration `json:"status,omitempty"` +} + +// ResourceClaim constructs an declarative configuration of the ResourceClaim type for use with +// apply. +func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration { + b := &ResourceClaimApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("ResourceClaim") + b.WithAPIVersion("resource.k8s.io/v1alpha2") + return b +} + +// ExtractResourceClaim extracts the applied configuration owned by fieldManager from +// resourceClaim. If no managedFields are found in resourceClaim for fieldManager, a +// ResourceClaimApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// resourceClaim must be a unmodified ResourceClaim API object that was retrieved from the Kubernetes API. +// ExtractResourceClaim provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { + return extractResourceClaim(resourceClaim, fieldManager, "") +} + +// ExtractResourceClaimStatus is the same as ExtractResourceClaim except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractResourceClaimStatus(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { + return extractResourceClaim(resourceClaim, fieldManager, "status") +} + +func extractResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) { + b := &ResourceClaimApplyConfiguration{} + err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaim"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(resourceClaim.Name) + b.WithNamespace(resourceClaim.Namespace) + + b.WithKind("ResourceClaim") + b.WithAPIVersion("resource.k8s.io/v1alpha2") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithKind(value string) *ResourceClaimApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithAPIVersion(value string) *ResourceClaimApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithName(value string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithGenerateName(value string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithNamespace(value string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithUID(value types.UID) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithResourceVersion(value string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithGeneration(value int64) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ResourceClaimApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ResourceClaimApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ResourceClaimApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ResourceClaimApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ResourceClaimApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithSpec(value *ResourceClaimSpecApplyConfiguration) *ResourceClaimApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithStatus(value *ResourceClaimStatusApplyConfiguration) *ResourceClaimApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go new file mode 100644 index 000000000..41bb9e9a1 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + types "k8s.io/apimachinery/pkg/types" +) + +// ResourceClaimConsumerReferenceApplyConfiguration represents an declarative configuration of the ResourceClaimConsumerReference type for use +// with apply. +type ResourceClaimConsumerReferenceApplyConfiguration struct { + APIGroup *string `json:"apiGroup,omitempty"` + Resource *string `json:"resource,omitempty"` + Name *string `json:"name,omitempty"` + UID *types.UID `json:"uid,omitempty"` +} + +// ResourceClaimConsumerReferenceApplyConfiguration constructs an declarative configuration of the ResourceClaimConsumerReference type for use with +// apply. +func ResourceClaimConsumerReference() *ResourceClaimConsumerReferenceApplyConfiguration { + return &ResourceClaimConsumerReferenceApplyConfiguration{} +} + +// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIGroup field is set to the value of the last call. +func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithAPIGroup(value string) *ResourceClaimConsumerReferenceApplyConfiguration { + b.APIGroup = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithResource(value string) *ResourceClaimConsumerReferenceApplyConfiguration { + b.Resource = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithName(value string) *ResourceClaimConsumerReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithUID(value types.UID) *ResourceClaimConsumerReferenceApplyConfiguration { + b.UID = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go new file mode 100644 index 000000000..27820ede6 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +// ResourceClaimParametersReferenceApplyConfiguration represents an declarative configuration of the ResourceClaimParametersReference type for use +// with apply. +type ResourceClaimParametersReferenceApplyConfiguration struct { + APIGroup *string `json:"apiGroup,omitempty"` + Kind *string `json:"kind,omitempty"` + Name *string `json:"name,omitempty"` +} + +// ResourceClaimParametersReferenceApplyConfiguration constructs an declarative configuration of the ResourceClaimParametersReference type for use with +// apply. +func ResourceClaimParametersReference() *ResourceClaimParametersReferenceApplyConfiguration { + return &ResourceClaimParametersReferenceApplyConfiguration{} +} + +// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIGroup field is set to the value of the last call. +func (b *ResourceClaimParametersReferenceApplyConfiguration) WithAPIGroup(value string) *ResourceClaimParametersReferenceApplyConfiguration { + b.APIGroup = &value + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ResourceClaimParametersReferenceApplyConfiguration) WithKind(value string) *ResourceClaimParametersReferenceApplyConfiguration { + b.Kind = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimParametersReferenceApplyConfiguration) WithName(value string) *ResourceClaimParametersReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go new file mode 100644 index 000000000..e74679aed --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go @@ -0,0 +1,50 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +// ResourceClaimSchedulingStatusApplyConfiguration represents an declarative configuration of the ResourceClaimSchedulingStatus type for use +// with apply. +type ResourceClaimSchedulingStatusApplyConfiguration struct { + Name *string `json:"name,omitempty"` + UnsuitableNodes []string `json:"unsuitableNodes,omitempty"` +} + +// ResourceClaimSchedulingStatusApplyConfiguration constructs an declarative configuration of the ResourceClaimSchedulingStatus type for use with +// apply. +func ResourceClaimSchedulingStatus() *ResourceClaimSchedulingStatusApplyConfiguration { + return &ResourceClaimSchedulingStatusApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimSchedulingStatusApplyConfiguration) WithName(value string) *ResourceClaimSchedulingStatusApplyConfiguration { + b.Name = &value + return b +} + +// WithUnsuitableNodes adds the given value to the UnsuitableNodes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the UnsuitableNodes field. +func (b *ResourceClaimSchedulingStatusApplyConfiguration) WithUnsuitableNodes(values ...string) *ResourceClaimSchedulingStatusApplyConfiguration { + for i := range values { + b.UnsuitableNodes = append(b.UnsuitableNodes, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go new file mode 100644 index 000000000..0c73e64e9 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" +) + +// ResourceClaimSpecApplyConfiguration represents an declarative configuration of the ResourceClaimSpec type for use +// with apply. +type ResourceClaimSpecApplyConfiguration struct { + ResourceClassName *string `json:"resourceClassName,omitempty"` + ParametersRef *ResourceClaimParametersReferenceApplyConfiguration `json:"parametersRef,omitempty"` + AllocationMode *resourcev1alpha2.AllocationMode `json:"allocationMode,omitempty"` +} + +// ResourceClaimSpecApplyConfiguration constructs an declarative configuration of the ResourceClaimSpec type for use with +// apply. +func ResourceClaimSpec() *ResourceClaimSpecApplyConfiguration { + return &ResourceClaimSpecApplyConfiguration{} +} + +// WithResourceClassName sets the ResourceClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceClassName field is set to the value of the last call. +func (b *ResourceClaimSpecApplyConfiguration) WithResourceClassName(value string) *ResourceClaimSpecApplyConfiguration { + b.ResourceClassName = &value + return b +} + +// WithParametersRef sets the ParametersRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParametersRef field is set to the value of the last call. +func (b *ResourceClaimSpecApplyConfiguration) WithParametersRef(value *ResourceClaimParametersReferenceApplyConfiguration) *ResourceClaimSpecApplyConfiguration { + b.ParametersRef = value + return b +} + +// WithAllocationMode sets the AllocationMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllocationMode field is set to the value of the last call. +func (b *ResourceClaimSpecApplyConfiguration) WithAllocationMode(value resourcev1alpha2.AllocationMode) *ResourceClaimSpecApplyConfiguration { + b.AllocationMode = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go new file mode 100644 index 000000000..c6fa61090 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +// ResourceClaimStatusApplyConfiguration represents an declarative configuration of the ResourceClaimStatus type for use +// with apply. +type ResourceClaimStatusApplyConfiguration struct { + DriverName *string `json:"driverName,omitempty"` + Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"` + ReservedFor []ResourceClaimConsumerReferenceApplyConfiguration `json:"reservedFor,omitempty"` + DeallocationRequested *bool `json:"deallocationRequested,omitempty"` +} + +// ResourceClaimStatusApplyConfiguration constructs an declarative configuration of the ResourceClaimStatus type for use with +// apply. +func ResourceClaimStatus() *ResourceClaimStatusApplyConfiguration { + return &ResourceClaimStatusApplyConfiguration{} +} + +// WithDriverName sets the DriverName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DriverName field is set to the value of the last call. +func (b *ResourceClaimStatusApplyConfiguration) WithDriverName(value string) *ResourceClaimStatusApplyConfiguration { + b.DriverName = &value + return b +} + +// WithAllocation sets the Allocation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Allocation field is set to the value of the last call. +func (b *ResourceClaimStatusApplyConfiguration) WithAllocation(value *AllocationResultApplyConfiguration) *ResourceClaimStatusApplyConfiguration { + b.Allocation = value + return b +} + +// WithReservedFor adds the given value to the ReservedFor field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ReservedFor field. +func (b *ResourceClaimStatusApplyConfiguration) WithReservedFor(values ...*ResourceClaimConsumerReferenceApplyConfiguration) *ResourceClaimStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithReservedFor") + } + b.ReservedFor = append(b.ReservedFor, *values[i]) + } + return b +} + +// WithDeallocationRequested sets the DeallocationRequested field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeallocationRequested field is set to the value of the last call. +func (b *ResourceClaimStatusApplyConfiguration) WithDeallocationRequested(value bool) *ResourceClaimStatusApplyConfiguration { + b.DeallocationRequested = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go new file mode 100644 index 000000000..fc2209b8f --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go @@ -0,0 +1,249 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ResourceClaimTemplateApplyConfiguration represents an declarative configuration of the ResourceClaimTemplate type for use +// with apply. +type ResourceClaimTemplateApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ResourceClaimTemplateSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ResourceClaimTemplate constructs an declarative configuration of the ResourceClaimTemplate type for use with +// apply. +func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyConfiguration { + b := &ResourceClaimTemplateApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("ResourceClaimTemplate") + b.WithAPIVersion("resource.k8s.io/v1alpha2") + return b +} + +// ExtractResourceClaimTemplate extracts the applied configuration owned by fieldManager from +// resourceClaimTemplate. If no managedFields are found in resourceClaimTemplate for fieldManager, a +// ResourceClaimTemplateApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// resourceClaimTemplate must be a unmodified ResourceClaimTemplate API object that was retrieved from the Kubernetes API. +// ExtractResourceClaimTemplate provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { + return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "") +} + +// ExtractResourceClaimTemplateStatus is the same as ExtractResourceClaimTemplate except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { + return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "status") +} + +func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) { + b := &ResourceClaimTemplateApplyConfiguration{} + err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaimTemplate"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(resourceClaimTemplate.Name) + b.WithNamespace(resourceClaimTemplate.Namespace) + + b.WithKind("ResourceClaimTemplate") + b.WithAPIVersion("resource.k8s.io/v1alpha2") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithKind(value string) *ResourceClaimTemplateApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithAPIVersion(value string) *ResourceClaimTemplateApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithName(value string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithGenerateName(value string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithNamespace(value string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithUID(value types.UID) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithResourceVersion(value string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithGeneration(value int64) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ResourceClaimTemplateApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ResourceClaimTemplateApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ResourceClaimTemplateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ResourceClaimTemplateApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ResourceClaimTemplateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithSpec(value *ResourceClaimTemplateSpecApplyConfiguration) *ResourceClaimTemplateApplyConfiguration { + b.Spec = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go new file mode 100644 index 000000000..2f38ea036 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go @@ -0,0 +1,188 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ResourceClaimTemplateSpecApplyConfiguration represents an declarative configuration of the ResourceClaimTemplateSpec type for use +// with apply. +type ResourceClaimTemplateSpecApplyConfiguration struct { + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ResourceClaimTemplateSpecApplyConfiguration constructs an declarative configuration of the ResourceClaimTemplateSpec type for use with +// apply. +func ResourceClaimTemplateSpec() *ResourceClaimTemplateSpecApplyConfiguration { + return &ResourceClaimTemplateSpecApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithName(value string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGenerateName(value string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithNamespace(value string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithUID(value types.UID) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithResourceVersion(value string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGeneration(value int64) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ResourceClaimTemplateSpecApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithSpec(value *ResourceClaimSpecApplyConfiguration) *ResourceClaimTemplateSpecApplyConfiguration { + b.Spec = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go new file mode 100644 index 000000000..724c9e88e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go @@ -0,0 +1,266 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ResourceClassApplyConfiguration represents an declarative configuration of the ResourceClass type for use +// with apply. +type ResourceClassApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + DriverName *string `json:"driverName,omitempty"` + ParametersRef *ResourceClassParametersReferenceApplyConfiguration `json:"parametersRef,omitempty"` + SuitableNodes *corev1.NodeSelectorApplyConfiguration `json:"suitableNodes,omitempty"` +} + +// ResourceClass constructs an declarative configuration of the ResourceClass type for use with +// apply. +func ResourceClass(name string) *ResourceClassApplyConfiguration { + b := &ResourceClassApplyConfiguration{} + b.WithName(name) + b.WithKind("ResourceClass") + b.WithAPIVersion("resource.k8s.io/v1alpha2") + return b +} + +// ExtractResourceClass extracts the applied configuration owned by fieldManager from +// resourceClass. If no managedFields are found in resourceClass for fieldManager, a +// ResourceClassApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// resourceClass must be a unmodified ResourceClass API object that was retrieved from the Kubernetes API. +// ExtractResourceClass provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractResourceClass(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { + return extractResourceClass(resourceClass, fieldManager, "") +} + +// ExtractResourceClassStatus is the same as ExtractResourceClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractResourceClassStatus(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { + return extractResourceClass(resourceClass, fieldManager, "status") +} + +func extractResourceClass(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string, subresource string) (*ResourceClassApplyConfiguration, error) { + b := &ResourceClassApplyConfiguration{} + err := managedfields.ExtractInto(resourceClass, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClass"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(resourceClass.Name) + + b.WithKind("ResourceClass") + b.WithAPIVersion("resource.k8s.io/v1alpha2") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ResourceClassApplyConfiguration) WithKind(value string) *ResourceClassApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ResourceClassApplyConfiguration) WithAPIVersion(value string) *ResourceClassApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClassApplyConfiguration) WithName(value string) *ResourceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ResourceClassApplyConfiguration) WithGenerateName(value string) *ResourceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceClassApplyConfiguration) WithNamespace(value string) *ResourceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceClassApplyConfiguration) WithUID(value types.UID) *ResourceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ResourceClassApplyConfiguration) WithResourceVersion(value string) *ResourceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourceClassApplyConfiguration) WithGeneration(value int64) *ResourceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ResourceClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ResourceClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ResourceClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ResourceClassApplyConfiguration) WithLabels(entries map[string]string) *ResourceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ResourceClassApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ResourceClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ResourceClassApplyConfiguration) WithFinalizers(values ...string) *ResourceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ResourceClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithDriverName sets the DriverName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DriverName field is set to the value of the last call. +func (b *ResourceClassApplyConfiguration) WithDriverName(value string) *ResourceClassApplyConfiguration { + b.DriverName = &value + return b +} + +// WithParametersRef sets the ParametersRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParametersRef field is set to the value of the last call. +func (b *ResourceClassApplyConfiguration) WithParametersRef(value *ResourceClassParametersReferenceApplyConfiguration) *ResourceClassApplyConfiguration { + b.ParametersRef = value + return b +} + +// WithSuitableNodes sets the SuitableNodes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SuitableNodes field is set to the value of the last call. +func (b *ResourceClassApplyConfiguration) WithSuitableNodes(value *corev1.NodeSelectorApplyConfiguration) *ResourceClassApplyConfiguration { + b.SuitableNodes = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go new file mode 100644 index 000000000..d67e4d397 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +// ResourceClassParametersReferenceApplyConfiguration represents an declarative configuration of the ResourceClassParametersReference type for use +// with apply. +type ResourceClassParametersReferenceApplyConfiguration struct { + APIGroup *string `json:"apiGroup,omitempty"` + Kind *string `json:"kind,omitempty"` + Name *string `json:"name,omitempty"` + Namespace *string `json:"namespace,omitempty"` +} + +// ResourceClassParametersReferenceApplyConfiguration constructs an declarative configuration of the ResourceClassParametersReference type for use with +// apply. +func ResourceClassParametersReference() *ResourceClassParametersReferenceApplyConfiguration { + return &ResourceClassParametersReferenceApplyConfiguration{} +} + +// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIGroup field is set to the value of the last call. +func (b *ResourceClassParametersReferenceApplyConfiguration) WithAPIGroup(value string) *ResourceClassParametersReferenceApplyConfiguration { + b.APIGroup = &value + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ResourceClassParametersReferenceApplyConfiguration) WithKind(value string) *ResourceClassParametersReferenceApplyConfiguration { + b.Kind = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClassParametersReferenceApplyConfiguration) WithName(value string) *ResourceClassParametersReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceClassParametersReferenceApplyConfiguration) WithNamespace(value string) *ResourceClassParametersReferenceApplyConfiguration { + b.Namespace = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go new file mode 100644 index 000000000..028cbaa1a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +// ResourceHandleApplyConfiguration represents an declarative configuration of the ResourceHandle type for use +// with apply. +type ResourceHandleApplyConfiguration struct { + DriverName *string `json:"driverName,omitempty"` + Data *string `json:"data,omitempty"` +} + +// ResourceHandleApplyConfiguration constructs an declarative configuration of the ResourceHandle type for use with +// apply. +func ResourceHandle() *ResourceHandleApplyConfiguration { + return &ResourceHandleApplyConfiguration{} +} + +// WithDriverName sets the DriverName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DriverName field is set to the value of the last call. +func (b *ResourceHandleApplyConfiguration) WithDriverName(value string) *ResourceHandleApplyConfiguration { + b.DriverName = &value + return b +} + +// WithData sets the Data field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Data field is set to the value of the last call. +func (b *ResourceHandleApplyConfiguration) WithData(value string) *ResourceHandleApplyConfiguration { + b.Data = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go index 1dc17ce96..a1ef00656 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go @@ -32,6 +32,7 @@ type CSIDriverSpecApplyConfiguration struct { FSGroupPolicy *v1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"` TokenRequests []TokenRequestApplyConfiguration `json:"tokenRequests,omitempty"` RequiresRepublish *bool `json:"requiresRepublish,omitempty"` + SELinuxMount *bool `json:"seLinuxMount,omitempty"` } // CSIDriverSpecApplyConfiguration constructs an declarative configuration of the CSIDriverSpec type for use with @@ -102,3 +103,11 @@ func (b *CSIDriverSpecApplyConfiguration) WithRequiresRepublish(value bool) *CSI b.RequiresRepublish = &value return b } + +// WithSELinuxMount sets the SELinuxMount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SELinuxMount field is set to the value of the last call. +func (b *CSIDriverSpecApplyConfiguration) WithSELinuxMount(value bool) *CSIDriverSpecApplyConfiguration { + b.SELinuxMount = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go index 1d943cbff..6097a615b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go @@ -32,6 +32,7 @@ type CSIDriverSpecApplyConfiguration struct { FSGroupPolicy *v1beta1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"` TokenRequests []TokenRequestApplyConfiguration `json:"tokenRequests,omitempty"` RequiresRepublish *bool `json:"requiresRepublish,omitempty"` + SELinuxMount *bool `json:"seLinuxMount,omitempty"` } // CSIDriverSpecApplyConfiguration constructs an declarative configuration of the CSIDriverSpec type for use with @@ -102,3 +103,11 @@ func (b *CSIDriverSpecApplyConfiguration) WithRequiresRepublish(value bool) *CSI b.RequiresRepublish = &value return b } + +// WithSELinuxMount sets the SELinuxMount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SELinuxMount field is set to the value of the last call. +func (b *CSIDriverSpecApplyConfiguration) WithSELinuxMount(value bool) *CSIDriverSpecApplyConfiguration { + b.SELinuxMount = &value + return b +} diff --git a/vendor/k8s.io/client-go/discovery/aggregated_discovery.go b/vendor/k8s.io/client-go/discovery/aggregated_discovery.go new file mode 100644 index 000000000..f72c42051 --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/aggregated_discovery.go @@ -0,0 +1,156 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "fmt" + + apidiscovery "k8s.io/api/apidiscovery/v2beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// StaleGroupVersionError encasulates failed GroupVersion marked "stale" +// in the returned AggregatedDiscovery format. +type StaleGroupVersionError struct { + gv schema.GroupVersion +} + +func (s StaleGroupVersionError) Error() string { + return fmt.Sprintf("stale GroupVersion discovery: %v", s.gv) +} + +// SplitGroupsAndResources transforms "aggregated" discovery top-level structure into +// the previous "unaggregated" discovery groups and resources. +func SplitGroupsAndResources(aggregatedGroups apidiscovery.APIGroupDiscoveryList) ( + *metav1.APIGroupList, + map[schema.GroupVersion]*metav1.APIResourceList, + map[schema.GroupVersion]error) { + // Aggregated group list will contain the entirety of discovery, including + // groups, versions, and resources. GroupVersions marked "stale" are failed. + groups := []*metav1.APIGroup{} + failedGVs := map[schema.GroupVersion]error{} + resourcesByGV := map[schema.GroupVersion]*metav1.APIResourceList{} + for _, aggGroup := range aggregatedGroups.Items { + group, resources, failed := convertAPIGroup(aggGroup) + groups = append(groups, group) + for gv, resourceList := range resources { + resourcesByGV[gv] = resourceList + } + for gv, err := range failed { + failedGVs[gv] = err + } + } + // Transform slice of groups to group list before returning. + groupList := &metav1.APIGroupList{} + groupList.Groups = make([]metav1.APIGroup, 0, len(groups)) + for _, group := range groups { + groupList.Groups = append(groupList.Groups, *group) + } + return groupList, resourcesByGV, failedGVs +} + +// convertAPIGroup tranforms an "aggregated" APIGroupDiscovery to an "legacy" APIGroup, +// also returning the map of APIResourceList for resources within GroupVersions. +func convertAPIGroup(g apidiscovery.APIGroupDiscovery) ( + *metav1.APIGroup, + map[schema.GroupVersion]*metav1.APIResourceList, + map[schema.GroupVersion]error) { + // Iterate through versions to convert to group and resources. + group := &metav1.APIGroup{} + gvResources := map[schema.GroupVersion]*metav1.APIResourceList{} + failedGVs := map[schema.GroupVersion]error{} + group.Name = g.ObjectMeta.Name + for _, v := range g.Versions { + gv := schema.GroupVersion{Group: g.Name, Version: v.Version} + if v.Freshness == apidiscovery.DiscoveryFreshnessStale { + failedGVs[gv] = StaleGroupVersionError{gv: gv} + continue + } + version := metav1.GroupVersionForDiscovery{} + version.GroupVersion = gv.String() + version.Version = v.Version + group.Versions = append(group.Versions, version) + // PreferredVersion is first non-stale Version + if group.PreferredVersion == (metav1.GroupVersionForDiscovery{}) { + group.PreferredVersion = version + } + resourceList := &metav1.APIResourceList{} + resourceList.GroupVersion = gv.String() + for _, r := range v.Resources { + resource, err := convertAPIResource(r) + if err == nil { + resourceList.APIResources = append(resourceList.APIResources, resource) + } + // Subresources field in new format get transformed into full APIResources. + // It is possible a partial result with an error was returned to be used + // as the parent resource for the subresource. + for _, subresource := range r.Subresources { + sr, err := convertAPISubresource(resource, subresource) + if err == nil { + resourceList.APIResources = append(resourceList.APIResources, sr) + } + } + } + gvResources[gv] = resourceList + } + return group, gvResources, failedGVs +} + +var emptyKind = metav1.GroupVersionKind{} + +// convertAPIResource tranforms a APIResourceDiscovery to an APIResource. We are +// resilient to missing GVK, since this resource might be the parent resource +// for a subresource. If the parent is missing a GVK, it is not returned in +// discovery, and the subresource MUST have the GVK. +func convertAPIResource(in apidiscovery.APIResourceDiscovery) (metav1.APIResource, error) { + result := metav1.APIResource{ + Name: in.Resource, + SingularName: in.SingularResource, + Namespaced: in.Scope == apidiscovery.ScopeNamespace, + Verbs: in.Verbs, + ShortNames: in.ShortNames, + Categories: in.Categories, + } + var err error + if in.ResponseKind != nil && (*in.ResponseKind) != emptyKind { + result.Group = in.ResponseKind.Group + result.Version = in.ResponseKind.Version + result.Kind = in.ResponseKind.Kind + } else { + err = fmt.Errorf("discovery resource %s missing GVK", in.Resource) + } + // Can return partial result with error, which can be the parent for a + // subresource. Do not add this result to the returned discovery resources. + return result, err +} + +// convertAPISubresource tranforms a APISubresourceDiscovery to an APIResource. +func convertAPISubresource(parent metav1.APIResource, in apidiscovery.APISubresourceDiscovery) (metav1.APIResource, error) { + result := metav1.APIResource{} + if in.ResponseKind == nil || (*in.ResponseKind) == emptyKind { + return result, fmt.Errorf("subresource %s/%s missing GVK", parent.Name, in.Subresource) + } + result.Name = fmt.Sprintf("%s/%s", parent.Name, in.Subresource) + result.SingularName = parent.SingularName + result.Namespaced = parent.Namespaced + result.Group = in.ResponseKind.Group + result.Version = in.ResponseKind.Version + result.Kind = in.ResponseKind.Kind + result.Verbs = in.Verbs + return result, nil +} diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go index baf878846..102ce49bf 100644 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + "mime" "net/http" "net/url" "sort" @@ -29,8 +30,9 @@ import ( //nolint:staticcheck // SA1019 Keep using module since it's still being maintained and the api of google.golang.org/protobuf/proto differs "github.com/golang/protobuf/proto" - openapi_v2 "github.com/google/gnostic/openapiv2" + openapi_v2 "github.com/google/gnostic-models/openapiv2" + apidiscovery "k8s.io/api/apidiscovery/v2beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -52,8 +54,22 @@ const ( // defaultTimeout is the maximum amount of time per request when no timeout has been set on a RESTClient. // Defaults to 32s in order to have a distinguishable length of time, relative to other timeouts that exist. defaultTimeout = 32 * time.Second + + // defaultBurst is the default burst to be used with the discovery client's token bucket rate limiter + defaultBurst = 300 + + AcceptV1 = runtime.ContentTypeJSON + // Aggregated discovery content-type (v2beta1). NOTE: content-type parameters + // MUST be ordered (g, v, as) for server in "Accept" header (BUT we are resilient + // to ordering when comparing returned values in "Content-Type" header). + AcceptV2Beta1 = runtime.ContentTypeJSON + ";" + "g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList" + // Prioritize aggregated discovery by placing first in the order of discovery accept types. + acceptDiscoveryFormats = AcceptV2Beta1 + "," + AcceptV1 ) +// Aggregated discovery content-type GVK. +var v2Beta1GVK = schema.GroupVersionKind{Group: "apidiscovery.k8s.io", Version: "v2beta1", Kind: "APIGroupDiscoveryList"} + // DiscoveryInterface holds the methods that discover server-supported API groups, // versions and resources. type DiscoveryInterface interface { @@ -63,6 +79,19 @@ type DiscoveryInterface interface { ServerVersionInterface OpenAPISchemaInterface OpenAPIV3SchemaInterface + // Returns copy of current discovery client that will only + // receive the legacy discovery format, or pointer to current + // discovery client if it does not support legacy-only discovery. + WithLegacy() DiscoveryInterface +} + +// AggregatedDiscoveryInterface extends DiscoveryInterface to include a method to possibly +// return discovery resources along with the discovery groups, which is what the newer +// aggregated discovery format does (APIGroupDiscoveryList). +type AggregatedDiscoveryInterface interface { + DiscoveryInterface + + GroupsAndMaybeResources() (*metav1.APIGroupList, map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error, error) } // CachedDiscoveryInterface is a DiscoveryInterface with cache invalidation and freshness. @@ -134,8 +163,12 @@ type DiscoveryClient struct { restClient restclient.Interface LegacyPrefix string + // Forces the client to request only "unaggregated" (legacy) discovery. + UseLegacyDiscovery bool } +var _ AggregatedDiscoveryInterface = &DiscoveryClient{} + // Convert metav1.APIVersions to metav1.APIGroup. APIVersions is used by legacy v1, so // group would be "". func apiVersionsToAPIGroup(apiVersions *metav1.APIVersions) (apiGroup metav1.APIGroup) { @@ -153,36 +186,187 @@ func apiVersionsToAPIGroup(apiVersions *metav1.APIVersions) (apiGroup metav1.API return } -// ServerGroups returns the supported groups, with information like supported versions and the -// preferred version. -func (d *DiscoveryClient) ServerGroups() (apiGroupList *metav1.APIGroupList, err error) { - // Get the groupVersions exposed at /api - v := &metav1.APIVersions{} - err = d.restClient.Get().AbsPath(d.LegacyPrefix).Do(context.TODO()).Into(v) - apiGroup := metav1.APIGroup{} - if err == nil && len(v.Versions) != 0 { - apiGroup = apiVersionsToAPIGroup(v) - } - if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { - return nil, err +// GroupsAndMaybeResources returns the discovery groups, and (if new aggregated +// discovery format) the resources keyed by group/version. Merges discovery groups +// and resources from /api and /apis (either aggregated or not). Legacy groups +// must be ordered first. The server will either return both endpoints (/api, /apis) +// as aggregated discovery format or legacy format. For safety, resources will only +// be returned if both endpoints returned resources. Returned "failedGVs" can be +// empty, but will only be nil in the case an error is returned. +func (d *DiscoveryClient) GroupsAndMaybeResources() ( + *metav1.APIGroupList, + map[schema.GroupVersion]*metav1.APIResourceList, + map[schema.GroupVersion]error, + error) { + // Legacy group ordered first (there is only one -- core/v1 group). Returned groups must + // be non-nil, but it could be empty. Returned resources, apiResources map could be nil. + groups, resources, failedGVs, err := d.downloadLegacy() + if err != nil { + return nil, nil, nil, err + } + // Discovery groups and (possibly) resources downloaded from /apis. + apiGroups, apiResources, failedApisGVs, aerr := d.downloadAPIs() + if aerr != nil { + return nil, nil, nil, aerr + } + // Merge apis groups into the legacy groups. + for _, group := range apiGroups.Groups { + groups.Groups = append(groups.Groups, group) + } + // For safety, only return resources if both endpoints returned resources. + if resources != nil && apiResources != nil { + for gv, resourceList := range apiResources { + resources[gv] = resourceList + } + } else if resources != nil { + resources = nil } + // Merge failed GroupVersions from /api and /apis + for gv, err := range failedApisGVs { + failedGVs[gv] = err + } + return groups, resources, failedGVs, err +} - // Get the groupVersions exposed at /apis - apiGroupList = &metav1.APIGroupList{} - err = d.restClient.Get().AbsPath("/apis").Do(context.TODO()).Into(apiGroupList) - if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { - return nil, err +// downloadLegacy returns the discovery groups and possibly resources +// for the legacy v1 GVR at /api, or an error if one occurred. It is +// possible for the resource map to be nil if the server returned +// the unaggregated discovery. Returned "failedGVs" can be empty, but +// will only be nil in the case of a returned error. +func (d *DiscoveryClient) downloadLegacy() ( + *metav1.APIGroupList, + map[schema.GroupVersion]*metav1.APIResourceList, + map[schema.GroupVersion]error, + error) { + accept := acceptDiscoveryFormats + if d.UseLegacyDiscovery { + accept = AcceptV1 + } + var responseContentType string + body, err := d.restClient.Get(). + AbsPath("/api"). + SetHeader("Accept", accept). + Do(context.TODO()). + ContentType(&responseContentType). + Raw() + apiGroupList := &metav1.APIGroupList{} + failedGVs := map[schema.GroupVersion]error{} + if err != nil { + // Tolerate 404, since aggregated api servers can return it. + if errors.IsNotFound(err) { + // Return empty structures and no error. + emptyGVMap := map[schema.GroupVersion]*metav1.APIResourceList{} + return apiGroupList, emptyGVMap, failedGVs, nil + } else { + return nil, nil, nil, err + } } - // to be compatible with a v1.0 server, if it's a 403 or 404, ignore and return whatever we got from /api - if err != nil && (errors.IsNotFound(err) || errors.IsForbidden(err)) { - apiGroupList = &metav1.APIGroupList{} + + var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList + // Based on the content-type server responded with: aggregated or unaggregated. + if isGVK, _ := ContentTypeIsGVK(responseContentType, v2Beta1GVK); isGVK { + var aggregatedDiscovery apidiscovery.APIGroupDiscoveryList + err = json.Unmarshal(body, &aggregatedDiscovery) + if err != nil { + return nil, nil, nil, err + } + apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) + } else { + // Default is unaggregated discovery v1. + var v metav1.APIVersions + err = json.Unmarshal(body, &v) + if err != nil { + return nil, nil, nil, err + } + apiGroup := metav1.APIGroup{} + if len(v.Versions) != 0 { + apiGroup = apiVersionsToAPIGroup(&v) + } + apiGroupList.Groups = []metav1.APIGroup{apiGroup} } - // prepend the group retrieved from /api to the list if not empty - if len(v.Versions) != 0 { - apiGroupList.Groups = append([]metav1.APIGroup{apiGroup}, apiGroupList.Groups...) + return apiGroupList, resourcesByGV, failedGVs, nil +} + +// downloadAPIs returns the discovery groups and (if aggregated format) the +// discovery resources. The returned groups will always exist, but the +// resources map may be nil. Returned "failedGVs" can be empty, but will +// only be nil in the case of a returned error. +func (d *DiscoveryClient) downloadAPIs() ( + *metav1.APIGroupList, + map[schema.GroupVersion]*metav1.APIResourceList, + map[schema.GroupVersion]error, + error) { + accept := acceptDiscoveryFormats + if d.UseLegacyDiscovery { + accept = AcceptV1 + } + var responseContentType string + body, err := d.restClient.Get(). + AbsPath("/apis"). + SetHeader("Accept", accept). + Do(context.TODO()). + ContentType(&responseContentType). + Raw() + if err != nil { + return nil, nil, nil, err + } + + apiGroupList := &metav1.APIGroupList{} + failedGVs := map[schema.GroupVersion]error{} + var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList + // Based on the content-type server responded with: aggregated or unaggregated. + if isGVK, _ := ContentTypeIsGVK(responseContentType, v2Beta1GVK); isGVK { + var aggregatedDiscovery apidiscovery.APIGroupDiscoveryList + err = json.Unmarshal(body, &aggregatedDiscovery) + if err != nil { + return nil, nil, nil, err + } + apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) + } else { + // Default is unaggregated discovery v1. + err = json.Unmarshal(body, apiGroupList) + if err != nil { + return nil, nil, nil, err + } + } + + return apiGroupList, resourcesByGV, failedGVs, nil +} + +// ContentTypeIsGVK checks of the content-type string is both +// "application/json" and matches the provided GVK. An error +// is returned if the content type string is malformed. +// NOTE: This function is resilient to the ordering of the +// content-type parameters, as well as parameters added by +// intermediaries such as proxies or gateways. Examples: +// +// ("application/json; g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList", {apidiscovery.k8s.io, v2beta1, APIGroupDiscoveryList}) = (true, nil) +// ("application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io", {apidiscovery.k8s.io, v2beta1, APIGroupDiscoveryList}) = (true, nil) +// ("application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io;charset=utf-8", {apidiscovery.k8s.io, v2beta1, APIGroupDiscoveryList}) = (true, nil) +// ("application/json", any GVK) = (false, nil) +// ("application/json; charset=UTF-8", any GVK) = (false, nil) +// ("malformed content type string", any GVK) = (false, error) +func ContentTypeIsGVK(contentType string, gvk schema.GroupVersionKind) (bool, error) { + base, params, err := mime.ParseMediaType(contentType) + if err != nil { + return false, err } - return apiGroupList, nil + gvkMatch := runtime.ContentTypeJSON == base && + params["g"] == gvk.Group && + params["v"] == gvk.Version && + params["as"] == gvk.Kind + return gvkMatch, nil +} + +// ServerGroups returns the supported groups, with information like supported versions and the +// preferred version. +func (d *DiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) { + groups, _, _, err := d.GroupsAndMaybeResources() + if err != nil { + return nil, err + } + return groups, nil } // ServerResourcesForGroupVersion returns the supported resources for a group and version. @@ -201,8 +385,10 @@ func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (r } err = d.restClient.Get().AbsPath(url.String()).Do(context.TODO()).Into(resources) if err != nil { - // ignore 403 or 404 error to be compatible with an v1.0 server. - if groupVersion == "v1" && (errors.IsNotFound(err) || errors.IsForbidden(err)) { + // Tolerate core/v1 not found response by returning empty resource list; + // this probably should not happen. But we should verify all callers are + // not depending on this toleration before removal. + if groupVersion == "v1" && errors.IsNotFound(err) { return resources, nil } return nil, err @@ -241,7 +427,23 @@ func IsGroupDiscoveryFailedError(err error) bool { } func ServerGroupsAndResources(d DiscoveryInterface) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { - sgs, err := d.ServerGroups() + var sgs *metav1.APIGroupList + var resources []*metav1.APIResourceList + var failedGVs map[schema.GroupVersion]error + var err error + + // If the passed discovery object implements the wider AggregatedDiscoveryInterface, + // then attempt to retrieve aggregated discovery with both groups and the resources. + if ad, ok := d.(AggregatedDiscoveryInterface); ok { + var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList + sgs, resourcesByGV, failedGVs, err = ad.GroupsAndMaybeResources() + for _, resourceList := range resourcesByGV { + resources = append(resources, resourceList) + } + } else { + sgs, err = d.ServerGroups() + } + if sgs == nil { return nil, nil, err } @@ -249,6 +451,16 @@ func ServerGroupsAndResources(d DiscoveryInterface) ([]*metav1.APIGroup, []*meta for i := range sgs.Groups { resultGroups = append(resultGroups, &sgs.Groups[i]) } + // resources is non-nil if aggregated discovery succeeded. + if resources != nil { + // Any stale Group/Versions returned by aggregated discovery + // must be surfaced to the caller as failed Group/Versions. + var ferr error + if len(failedGVs) > 0 { + ferr = &ErrGroupDiscoveryFailed{Groups: failedGVs} + } + return resultGroups, resources, ferr + } groupVersionResources, failedGroups := fetchGroupVersionResources(d, sgs) @@ -272,12 +484,27 @@ func ServerGroupsAndResources(d DiscoveryInterface) ([]*metav1.APIGroup, []*meta // ServerPreferredResources uses the provided discovery interface to look up preferred resources func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { - serverGroupList, err := d.ServerGroups() + var serverGroupList *metav1.APIGroupList + var failedGroups map[schema.GroupVersion]error + var groupVersionResources map[schema.GroupVersion]*metav1.APIResourceList + var err error + + // If the passed discovery object implements the wider AggregatedDiscoveryInterface, + // then it is attempt to retrieve both the groups and the resources. "failedGroups" + // are Group/Versions returned as stale in AggregatedDiscovery format. + ad, ok := d.(AggregatedDiscoveryInterface) + if ok { + serverGroupList, groupVersionResources, failedGroups, err = ad.GroupsAndMaybeResources() + } else { + serverGroupList, err = d.ServerGroups() + } if err != nil { return nil, err } - - groupVersionResources, failedGroups := fetchGroupVersionResources(d, serverGroupList) + // Non-aggregated discovery must fetch resources from Groups. + if groupVersionResources == nil { + groupVersionResources, failedGroups = fetchGroupVersionResources(d, serverGroupList) + } result := []*metav1.APIResourceList{} grVersions := map[schema.GroupResource]string{} // selected version of a GroupResource @@ -433,6 +660,14 @@ func (d *DiscoveryClient) OpenAPIV3() openapi.Client { return openapi.NewClient(d.restClient) } +// WithLegacy returns copy of current discovery client that will only +// receive the legacy discovery format. +func (d *DiscoveryClient) WithLegacy() DiscoveryInterface { + client := *d + client.UseLegacyDiscovery = true + return &client +} + // withRetries retries the given recovery function in case the groups supported by the server change after ServerGroup() returns. func withRetries(maxRetries int, f func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error)) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { var result []*metav1.APIResourceList @@ -456,12 +691,13 @@ func setDiscoveryDefaults(config *restclient.Config) error { if config.Timeout == 0 { config.Timeout = defaultTimeout } - if config.Burst == 0 && config.QPS < 100 { + // if a burst limit is not already configured + if config.Burst == 0 { // discovery is expected to be bursty, increase the default burst // to accommodate looking up resource info for many API groups. // matches burst set by ConfigFlags#ToDiscoveryClient(). // see https://issue.k8s.io/86149 - config.Burst = 100 + config.Burst = defaultBurst } codec := runtime.NoopEncoder{Decoder: scheme.Codecs.UniversalDecoder()} config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec}) @@ -496,7 +732,7 @@ func NewDiscoveryClientForConfigAndClient(c *restclient.Config, httpClient *http return nil, err } client, err := restclient.UnversionedRESTClientForConfigAndClient(&config, httpClient) - return &DiscoveryClient{restClient: client, LegacyPrefix: "/api"}, err + return &DiscoveryClient{restClient: client, LegacyPrefix: "/api", UseLegacyDiscovery: false}, err } // NewDiscoveryClientForConfigOrDie creates a new DiscoveryClient for the given config. If @@ -512,7 +748,7 @@ func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient { // NewDiscoveryClient returns a new DiscoveryClient for the given RESTClient. func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient { - return &DiscoveryClient{restClient: c, LegacyPrefix: "/api"} + return &DiscoveryClient{restClient: c, LegacyPrefix: "/api", UseLegacyDiscovery: false} } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/dynamic/interface.go b/vendor/k8s.io/client-go/dynamic/interface.go index b08067c34..a310b63e5 100644 --- a/vendor/k8s.io/client-go/dynamic/interface.go +++ b/vendor/k8s.io/client-go/dynamic/interface.go @@ -40,6 +40,8 @@ type ResourceInterface interface { List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, options metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) + Apply(ctx context.Context, name string, obj *unstructured.Unstructured, options metav1.ApplyOptions, subresources ...string) (*unstructured.Unstructured, error) + ApplyStatus(ctx context.Context, name string, obj *unstructured.Unstructured, options metav1.ApplyOptions) (*unstructured.Unstructured, error) } type NamespaceableResourceInterface interface { diff --git a/vendor/k8s.io/client-go/dynamic/simple.go b/vendor/k8s.io/client-go/dynamic/simple.go index 87594bf2e..4b5485953 100644 --- a/vendor/k8s.io/client-go/dynamic/simple.go +++ b/vendor/k8s.io/client-go/dynamic/simple.go @@ -31,11 +31,11 @@ import ( "k8s.io/client-go/rest" ) -type dynamicClient struct { - client *rest.RESTClient +type DynamicClient struct { + client rest.Interface } -var _ Interface = &dynamicClient{} +var _ Interface = &DynamicClient{} // ConfigFor returns a copy of the provided config with the // appropriate dynamic client defaults set. @@ -50,9 +50,14 @@ func ConfigFor(inConfig *rest.Config) *rest.Config { return config } -// NewForConfigOrDie creates a new Interface for the given config and +// New creates a new DynamicClient for the given RESTClient. +func New(c rest.Interface) *DynamicClient { + return &DynamicClient{client: c} +} + +// NewForConfigOrDie creates a new DynamicClient for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) Interface { +func NewForConfigOrDie(c *rest.Config) *DynamicClient { ret, err := NewForConfig(c) if err != nil { panic(err) @@ -63,7 +68,7 @@ func NewForConfigOrDie(c *rest.Config) Interface { // NewForConfig creates a new dynamic client or returns an error. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(inConfig *rest.Config) (Interface, error) { +func NewForConfig(inConfig *rest.Config) (*DynamicClient, error) { config := ConfigFor(inConfig) httpClient, err := rest.HTTPClientFor(config) @@ -75,7 +80,7 @@ func NewForConfig(inConfig *rest.Config) (Interface, error) { // NewForConfigAndClient creates a new dynamic client for the given config and http client. // Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(inConfig *rest.Config, h *http.Client) (Interface, error) { +func NewForConfigAndClient(inConfig *rest.Config, h *http.Client) (*DynamicClient, error) { config := ConfigFor(inConfig) // for serializing the options config.GroupVersion = &schema.GroupVersion{} @@ -85,16 +90,16 @@ func NewForConfigAndClient(inConfig *rest.Config, h *http.Client) (Interface, er if err != nil { return nil, err } - return &dynamicClient{client: restClient}, nil + return &DynamicClient{client: restClient}, nil } type dynamicResourceClient struct { - client *dynamicClient + client *DynamicClient namespace string resource schema.GroupVersionResource } -func (c *dynamicClient) Resource(resource schema.GroupVersionResource) NamespaceableResourceInterface { +func (c *DynamicClient) Resource(resource schema.GroupVersionResource) NamespaceableResourceInterface { return &dynamicResourceClient{client: c, resource: resource} } @@ -120,6 +125,9 @@ func (c *dynamicResourceClient) Create(ctx context.Context, obj *unstructured.Un return nil, fmt.Errorf("name is required") } } + if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { + return nil, err + } result := c.client.client. Post(). @@ -152,6 +160,9 @@ func (c *dynamicResourceClient) Update(ctx context.Context, obj *unstructured.Un if len(name) == 0 { return nil, fmt.Errorf("name is required") } + if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { + return nil, err + } outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) if err != nil { return nil, err @@ -188,7 +199,9 @@ func (c *dynamicResourceClient) UpdateStatus(ctx context.Context, obj *unstructu if len(name) == 0 { return nil, fmt.Errorf("name is required") } - + if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { + return nil, err + } outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) if err != nil { return nil, err @@ -220,6 +233,9 @@ func (c *dynamicResourceClient) Delete(ctx context.Context, name string, opts me if len(name) == 0 { return fmt.Errorf("name is required") } + if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { + return err + } deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts) if err != nil { return err @@ -235,6 +251,10 @@ func (c *dynamicResourceClient) Delete(ctx context.Context, name string, opts me } func (c *dynamicResourceClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOptions metav1.ListOptions) error { + if err := validateNamespaceWithOptionalName(c.namespace); err != nil { + return err + } + deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts) if err != nil { return err @@ -254,6 +274,9 @@ func (c *dynamicResourceClient) Get(ctx context.Context, name string, opts metav if len(name) == 0 { return nil, fmt.Errorf("name is required") } + if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { + return nil, err + } result := c.client.client.Get().AbsPath(append(c.makeURLSegments(name), subresources...)...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do(ctx) if err := result.Error(); err != nil { return nil, err @@ -270,6 +293,9 @@ func (c *dynamicResourceClient) Get(ctx context.Context, name string, opts metav } func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { + if err := validateNamespaceWithOptionalName(c.namespace); err != nil { + return nil, err + } result := c.client.client.Get().AbsPath(c.makeURLSegments("")...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do(ctx) if err := result.Error(); err != nil { return nil, err @@ -295,6 +321,9 @@ func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOption func (c *dynamicResourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { opts.Watch = true + if err := validateNamespaceWithOptionalName(c.namespace); err != nil { + return nil, err + } return c.client.client.Get().AbsPath(c.makeURLSegments("")...). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). Watch(ctx) @@ -304,6 +333,9 @@ func (c *dynamicResourceClient) Patch(ctx context.Context, name string, pt types if len(name) == 0 { return nil, fmt.Errorf("name is required") } + if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { + return nil, err + } result := c.client.client. Patch(pt). AbsPath(append(c.makeURLSegments(name), subresources...)...). @@ -324,6 +356,65 @@ func (c *dynamicResourceClient) Patch(ctx context.Context, name string, pt types return uncastObj.(*unstructured.Unstructured), nil } +func (c *dynamicResourceClient) Apply(ctx context.Context, name string, obj *unstructured.Unstructured, opts metav1.ApplyOptions, subresources ...string) (*unstructured.Unstructured, error) { + if len(name) == 0 { + return nil, fmt.Errorf("name is required") + } + if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { + return nil, err + } + outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) + if err != nil { + return nil, err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + managedFields := accessor.GetManagedFields() + if len(managedFields) > 0 { + return nil, fmt.Errorf(`cannot apply an object with managed fields already set. + Use the client-go/applyconfigurations "UnstructructuredExtractor" to obtain the unstructured ApplyConfiguration for the given field manager that you can use/modify here to apply`) + } + patchOpts := opts.ToPatchOptions() + + result := c.client.client. + Patch(types.ApplyPatchType). + AbsPath(append(c.makeURLSegments(name), subresources...)...). + Body(outBytes). + SpecificallyVersionedParams(&patchOpts, dynamicParameterCodec, versionV1). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + retBytes, err := result.Raw() + if err != nil { + return nil, err + } + uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) + if err != nil { + return nil, err + } + return uncastObj.(*unstructured.Unstructured), nil +} +func (c *dynamicResourceClient) ApplyStatus(ctx context.Context, name string, obj *unstructured.Unstructured, opts metav1.ApplyOptions) (*unstructured.Unstructured, error) { + return c.Apply(ctx, name, obj, opts, "status") +} + +func validateNamespaceWithOptionalName(namespace string, name ...string) error { + if msgs := rest.IsValidPathSegmentName(namespace); len(msgs) != 0 { + return fmt.Errorf("invalid namespace %q: %v", namespace, msgs) + } + if len(name) > 1 { + panic("Invalid number of names") + } else if len(name) == 1 { + if msgs := rest.IsValidPathSegmentName(name[0]); len(msgs) != 0 { + return fmt.Errorf("invalid resource name %q: %v", name[0], msgs) + } + } + return nil +} + func (c *dynamicResourceClient) makeURLSegments(name string) []string { url := []string{} if len(c.resource.Group) == 0 { diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/interface.go index 14a6db438..7cd8d7276 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/interface.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/interface.go @@ -20,6 +20,7 @@ package admissionregistration import ( v1 "k8s.io/client-go/informers/admissionregistration/v1" + v1alpha1 "k8s.io/client-go/informers/admissionregistration/v1alpha1" v1beta1 "k8s.io/client-go/informers/admissionregistration/v1beta1" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" ) @@ -28,6 +29,8 @@ import ( type Interface interface { // V1 provides access to shared informers for resources in V1. V1() v1.Interface + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface } @@ -48,6 +51,11 @@ func (g *group) V1() v1.Interface { return v1.New(g.factory, g.namespace, g.tweakListOptions) } +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} + // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go new file mode 100644 index 000000000..738063ee7 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ValidatingAdmissionPolicies returns a ValidatingAdmissionPolicyInformer. + ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInformer + // ValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindingInformer. + ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ValidatingAdmissionPolicies returns a ValidatingAdmissionPolicyInformer. +func (v *version) ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInformer { + return &validatingAdmissionPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindingInformer. +func (v *version) ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInformer { + return &validatingAdmissionPolicyBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go new file mode 100644 index 000000000..01b8a4ab8 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// ValidatingAdmissionPolicyInformer provides access to a shared informer and lister for +// ValidatingAdmissionPolicies. +type ValidatingAdmissionPolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ValidatingAdmissionPolicyLister +} + +type validatingAdmissionPolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewValidatingAdmissionPolicyInformer constructs a new informer for ValidatingAdmissionPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewValidatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredValidatingAdmissionPolicyInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredValidatingAdmissionPolicyInformer constructs a new informer for ValidatingAdmissionPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredValidatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicies().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicies().Watch(context.TODO(), options) + }, + }, + &admissionregistrationv1alpha1.ValidatingAdmissionPolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *validatingAdmissionPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredValidatingAdmissionPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *validatingAdmissionPolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistrationv1alpha1.ValidatingAdmissionPolicy{}, f.defaultInformer) +} + +func (f *validatingAdmissionPolicyInformer) Lister() v1alpha1.ValidatingAdmissionPolicyLister { + return v1alpha1.NewValidatingAdmissionPolicyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go new file mode 100644 index 000000000..bd531512b --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// ValidatingAdmissionPolicyBindingInformer provides access to a shared informer and lister for +// ValidatingAdmissionPolicyBindings. +type ValidatingAdmissionPolicyBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ValidatingAdmissionPolicyBindingLister +} + +type validatingAdmissionPolicyBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewValidatingAdmissionPolicyBindingInformer constructs a new informer for ValidatingAdmissionPolicyBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewValidatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredValidatingAdmissionPolicyBindingInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredValidatingAdmissionPolicyBindingInformer constructs a new informer for ValidatingAdmissionPolicyBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredValidatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicyBindings().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicyBindings().Watch(context.TODO(), options) + }, + }, + &admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *validatingAdmissionPolicyBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredValidatingAdmissionPolicyBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *validatingAdmissionPolicyBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding{}, f.defaultInformer) +} + +func (f *validatingAdmissionPolicyBindingInformer) Lister() v1alpha1.ValidatingAdmissionPolicyBindingLister { + return v1alpha1.NewValidatingAdmissionPolicyBindingLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go index d1e2b61be..815960df5 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go @@ -26,6 +26,10 @@ import ( type Interface interface { // MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer. MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer + // ValidatingAdmissionPolicies returns a ValidatingAdmissionPolicyInformer. + ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInformer + // ValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindingInformer. + ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInformer // ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer. ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer } @@ -46,6 +50,16 @@ func (v *version) MutatingWebhookConfigurations() MutatingWebhookConfigurationIn return &mutatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// ValidatingAdmissionPolicies returns a ValidatingAdmissionPolicyInformer. +func (v *version) ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInformer { + return &validatingAdmissionPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindingInformer. +func (v *version) ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInformer { + return &validatingAdmissionPolicyBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer. func (v *version) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer { return &validatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go new file mode 100644 index 000000000..d0e9cd64c --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + time "time" + + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// ValidatingAdmissionPolicyInformer provides access to a shared informer and lister for +// ValidatingAdmissionPolicies. +type ValidatingAdmissionPolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.ValidatingAdmissionPolicyLister +} + +type validatingAdmissionPolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewValidatingAdmissionPolicyInformer constructs a new informer for ValidatingAdmissionPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewValidatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredValidatingAdmissionPolicyInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredValidatingAdmissionPolicyInformer constructs a new informer for ValidatingAdmissionPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredValidatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1beta1().ValidatingAdmissionPolicies().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1beta1().ValidatingAdmissionPolicies().Watch(context.TODO(), options) + }, + }, + &admissionregistrationv1beta1.ValidatingAdmissionPolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *validatingAdmissionPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredValidatingAdmissionPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *validatingAdmissionPolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistrationv1beta1.ValidatingAdmissionPolicy{}, f.defaultInformer) +} + +func (f *validatingAdmissionPolicyInformer) Lister() v1beta1.ValidatingAdmissionPolicyLister { + return v1beta1.NewValidatingAdmissionPolicyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go new file mode 100644 index 000000000..7641e9940 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + time "time" + + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// ValidatingAdmissionPolicyBindingInformer provides access to a shared informer and lister for +// ValidatingAdmissionPolicyBindings. +type ValidatingAdmissionPolicyBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.ValidatingAdmissionPolicyBindingLister +} + +type validatingAdmissionPolicyBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewValidatingAdmissionPolicyBindingInformer constructs a new informer for ValidatingAdmissionPolicyBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewValidatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredValidatingAdmissionPolicyBindingInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredValidatingAdmissionPolicyBindingInformer constructs a new informer for ValidatingAdmissionPolicyBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredValidatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1beta1().ValidatingAdmissionPolicyBindings().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1beta1().ValidatingAdmissionPolicyBindings().Watch(context.TODO(), options) + }, + }, + &admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *validatingAdmissionPolicyBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredValidatingAdmissionPolicyBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *validatingAdmissionPolicyBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding{}, f.defaultInformer) +} + +func (f *validatingAdmissionPolicyBindingInformer) Lister() v1beta1.ValidatingAdmissionPolicyBindingLister { + return v1beta1.NewValidatingAdmissionPolicyBindingLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/certificates/interface.go b/vendor/k8s.io/client-go/informers/certificates/interface.go index e38d01177..39a4e2911 100644 --- a/vendor/k8s.io/client-go/informers/certificates/interface.go +++ b/vendor/k8s.io/client-go/informers/certificates/interface.go @@ -20,6 +20,7 @@ package certificates import ( v1 "k8s.io/client-go/informers/certificates/v1" + v1alpha1 "k8s.io/client-go/informers/certificates/v1alpha1" v1beta1 "k8s.io/client-go/informers/certificates/v1beta1" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" ) @@ -28,6 +29,8 @@ import ( type Interface interface { // V1 provides access to shared informers for resources in V1. V1() v1.Interface + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface } @@ -48,6 +51,11 @@ func (g *group) V1() v1.Interface { return v1.New(g.factory, g.namespace, g.tweakListOptions) } +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} + // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go new file mode 100644 index 000000000..e8b341587 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/certificates/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterTrustBundleInformer provides access to a shared informer and lister for +// ClusterTrustBundles. +type ClusterTrustBundleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ClusterTrustBundleLister +} + +type clusterTrustBundleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterTrustBundleInformer constructs a new informer for ClusterTrustBundle type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterTrustBundleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterTrustBundleInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterTrustBundleInformer constructs a new informer for ClusterTrustBundle type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterTrustBundleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertificatesV1alpha1().ClusterTrustBundles().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertificatesV1alpha1().ClusterTrustBundles().Watch(context.TODO(), options) + }, + }, + &certificatesv1alpha1.ClusterTrustBundle{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterTrustBundleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterTrustBundleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterTrustBundleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&certificatesv1alpha1.ClusterTrustBundle{}, f.defaultInformer) +} + +func (f *clusterTrustBundleInformer) Lister() v1alpha1.ClusterTrustBundleLister { + return v1alpha1.NewClusterTrustBundleLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/certificates/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/certificates/v1alpha1/interface.go new file mode 100644 index 000000000..40ce8f42d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/certificates/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ClusterTrustBundles returns a ClusterTrustBundleInformer. + ClusterTrustBundles() ClusterTrustBundleInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ClusterTrustBundles returns a ClusterTrustBundleInformer. +func (v *version) ClusterTrustBundles() ClusterTrustBundleInformer { + return &clusterTrustBundleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/doc.go b/vendor/k8s.io/client-go/informers/doc.go new file mode 100644 index 000000000..231bffb69 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package informers provides generated informers for Kubernetes APIs. +package informers diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go index 6f0bea7e8..600741e3a 100644 --- a/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go @@ -32,8 +32,6 @@ type Interface interface { Ingresses() IngressInformer // NetworkPolicies returns a NetworkPolicyInformer. NetworkPolicies() NetworkPolicyInformer - // PodSecurityPolicies returns a PodSecurityPolicyInformer. - PodSecurityPolicies() PodSecurityPolicyInformer // ReplicaSets returns a ReplicaSetInformer. ReplicaSets() ReplicaSetInformer } @@ -69,11 +67,6 @@ func (v *version) NetworkPolicies() NetworkPolicyInformer { return &networkPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } -// PodSecurityPolicies returns a PodSecurityPolicyInformer. -func (v *version) PodSecurityPolicies() PodSecurityPolicyInformer { - return &podSecurityPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} - // ReplicaSets returns a ReplicaSetInformer. func (v *version) ReplicaSets() ReplicaSetInformer { return &replicaSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/vendor/k8s.io/client-go/informers/factory.go b/vendor/k8s.io/client-go/informers/factory.go index c6e102699..7dd0ae635 100644 --- a/vendor/k8s.io/client-go/informers/factory.go +++ b/vendor/k8s.io/client-go/informers/factory.go @@ -43,6 +43,7 @@ import ( node "k8s.io/client-go/informers/node" policy "k8s.io/client-go/informers/policy" rbac "k8s.io/client-go/informers/rbac" + resource "k8s.io/client-go/informers/resource" scheduling "k8s.io/client-go/informers/scheduling" storage "k8s.io/client-go/informers/storage" kubernetes "k8s.io/client-go/kubernetes" @@ -64,6 +65,11 @@ type sharedInformerFactory struct { // startedInformers is used for tracking which informers have been started. // This allows Start() to be called multiple times safely. startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool } // WithCustomResyncConfig sets a custom resync period for the specified informer types. @@ -124,20 +130,39 @@ func NewSharedInformerFactoryWithOptions(client kubernetes.Interface, defaultRes return factory } -// Start initializes all requested informers. func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { f.lock.Lock() defer f.lock.Unlock() + if f.shuttingDown { + return + } + for informerType, informer := range f.informers { if !f.startedInformers[informerType] { - go informer.Run(stopCh) + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() f.startedInformers[informerType] = true } } } -// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { informers := func() map[reflect.Type]cache.SharedIndexInformer { f.lock.Lock() @@ -159,7 +184,7 @@ func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[ref return res } -// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// InformerFor returns the SharedIndexInformer for obj using an internal // client. func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { f.lock.Lock() @@ -184,11 +209,58 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal // SharedInformerFactory provides shared informers for resources in all known // API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) type SharedInformerFactory interface { internalinterfaces.SharedInformerFactory - ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + Admissionregistration() admissionregistration.Interface Internal() apiserverinternal.Interface Apps() apps.Interface @@ -205,6 +277,7 @@ type SharedInformerFactory interface { Node() node.Interface Policy() policy.Interface Rbac() rbac.Interface + Resource() resource.Interface Scheduling() scheduling.Interface Storage() storage.Interface } @@ -273,6 +346,10 @@ func (f *sharedInformerFactory) Rbac() rbac.Interface { return rbac.New(f, f.namespace, f.tweakListOptions) } +func (f *sharedInformerFactory) Resource() resource.Interface { + return resource.New(f, f.namespace, f.tweakListOptions) +} + func (f *sharedInformerFactory) Scheduling() scheduling.Interface { return scheduling.New(f, f.namespace, f.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/interface.go b/vendor/k8s.io/client-go/informers/flowcontrol/interface.go index 828a7bc8b..1d3ca09ef 100644 --- a/vendor/k8s.io/client-go/informers/flowcontrol/interface.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/interface.go @@ -22,6 +22,7 @@ import ( v1alpha1 "k8s.io/client-go/informers/flowcontrol/v1alpha1" v1beta1 "k8s.io/client-go/informers/flowcontrol/v1beta1" v1beta2 "k8s.io/client-go/informers/flowcontrol/v1beta2" + v1beta3 "k8s.io/client-go/informers/flowcontrol/v1beta3" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" ) @@ -33,6 +34,8 @@ type Interface interface { V1beta1() v1beta1.Interface // V1beta2 provides access to shared informers for resources in V1beta2. V1beta2() v1beta2.Interface + // V1beta3 provides access to shared informers for resources in V1beta3. + V1beta3() v1beta3.Interface } type group struct { @@ -60,3 +63,8 @@ func (g *group) V1beta1() v1beta1.Interface { func (g *group) V1beta2() v1beta2.Interface { return v1beta2.New(g.factory, g.namespace, g.tweakListOptions) } + +// V1beta3 returns a new v1beta3.Interface. +func (g *group) V1beta3() v1beta3.Interface { + return v1beta3.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/flowschema.go similarity index 51% rename from vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go rename to vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/flowschema.go index 11be2751c..56d8c8b11 100644 --- a/vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/flowschema.go @@ -16,74 +16,74 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1beta1 +package v1beta3 import ( "context" time "time" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + v1beta3 "k8s.io/client-go/listers/flowcontrol/v1beta3" cache "k8s.io/client-go/tools/cache" ) -// PodSecurityPolicyInformer provides access to a shared informer and lister for -// PodSecurityPolicies. -type PodSecurityPolicyInformer interface { +// FlowSchemaInformer provides access to a shared informer and lister for +// FlowSchemas. +type FlowSchemaInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.PodSecurityPolicyLister + Lister() v1beta3.FlowSchemaLister } -type podSecurityPolicyInformer struct { +type flowSchemaInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc } -// NewPodSecurityPolicyInformer constructs a new informer for PodSecurityPolicy type. +// NewFlowSchemaInformer constructs a new informer for FlowSchema type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewPodSecurityPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredPodSecurityPolicyInformer(client, resyncPeriod, indexers, nil) +func NewFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredFlowSchemaInformer(client, resyncPeriod, indexers, nil) } -// NewFilteredPodSecurityPolicyInformer constructs a new informer for PodSecurityPolicy type. +// NewFilteredFlowSchemaInformer constructs a new informer for FlowSchema type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFilteredPodSecurityPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { +func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().PodSecurityPolicies().List(context.TODO(), options) + return client.FlowcontrolV1beta3().FlowSchemas().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().PodSecurityPolicies().Watch(context.TODO(), options) + return client.FlowcontrolV1beta3().FlowSchemas().Watch(context.TODO(), options) }, }, - &extensionsv1beta1.PodSecurityPolicy{}, + &flowcontrolv1beta3.FlowSchema{}, resyncPeriod, indexers, ) } -func (f *podSecurityPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredPodSecurityPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +func (f *flowSchemaInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredFlowSchemaInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -func (f *podSecurityPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensionsv1beta1.PodSecurityPolicy{}, f.defaultInformer) +func (f *flowSchemaInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&flowcontrolv1beta3.FlowSchema{}, f.defaultInformer) } -func (f *podSecurityPolicyInformer) Lister() v1beta1.PodSecurityPolicyLister { - return v1beta1.NewPodSecurityPolicyLister(f.Informer().GetIndexer()) +func (f *flowSchemaInformer) Lister() v1beta3.FlowSchemaLister { + return v1beta3.NewFlowSchemaLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/interface.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/interface.go new file mode 100644 index 000000000..54c5414a2 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/interface.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta3 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // FlowSchemas returns a FlowSchemaInformer. + FlowSchemas() FlowSchemaInformer + // PriorityLevelConfigurations returns a PriorityLevelConfigurationInformer. + PriorityLevelConfigurations() PriorityLevelConfigurationInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// FlowSchemas returns a FlowSchemaInformer. +func (v *version) FlowSchemas() FlowSchemaInformer { + return &flowSchemaInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// PriorityLevelConfigurations returns a PriorityLevelConfigurationInformer. +func (v *version) PriorityLevelConfigurations() PriorityLevelConfigurationInformer { + return &priorityLevelConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/prioritylevelconfiguration.go new file mode 100644 index 000000000..71f8d5b07 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/prioritylevelconfiguration.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta3 + +import ( + "context" + time "time" + + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta3 "k8s.io/client-go/listers/flowcontrol/v1beta3" + cache "k8s.io/client-go/tools/cache" +) + +// PriorityLevelConfigurationInformer provides access to a shared informer and lister for +// PriorityLevelConfigurations. +type PriorityLevelConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta3.PriorityLevelConfigurationLister +} + +type priorityLevelConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewPriorityLevelConfigurationInformer constructs a new informer for PriorityLevelConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPriorityLevelConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPriorityLevelConfigurationInformer constructs a new informer for PriorityLevelConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.FlowcontrolV1beta3().PriorityLevelConfigurations().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.FlowcontrolV1beta3().PriorityLevelConfigurations().Watch(context.TODO(), options) + }, + }, + &flowcontrolv1beta3.PriorityLevelConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *priorityLevelConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPriorityLevelConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *priorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&flowcontrolv1beta3.PriorityLevelConfiguration{}, f.defaultInformer) +} + +func (f *priorityLevelConfigurationInformer) Lister() v1beta3.PriorityLevelConfigurationLister { + return v1beta3.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/generic.go b/vendor/k8s.io/client-go/informers/generic.go index 4c2e53c25..5495239b2 100644 --- a/vendor/k8s.io/client-go/informers/generic.go +++ b/vendor/k8s.io/client-go/informers/generic.go @@ -22,6 +22,7 @@ import ( "fmt" v1 "k8s.io/api/admissionregistration/v1" + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1beta1 "k8s.io/api/admissionregistration/v1beta1" apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" appsv1 "k8s.io/api/apps/v1" @@ -34,6 +35,7 @@ import ( batchv1 "k8s.io/api/batch/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" certificatesv1 "k8s.io/api/certificates/v1" + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" coordinationv1 "k8s.io/api/coordination/v1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" @@ -43,10 +45,12 @@ import ( eventsv1 "k8s.io/api/events/v1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + v1beta3 "k8s.io/api/flowcontrol/v1beta3" networkingv1 "k8s.io/api/networking/v1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" networkingv1beta1 "k8s.io/api/networking/v1beta1" nodev1 "k8s.io/api/node/v1" nodev1alpha1 "k8s.io/api/node/v1alpha1" @@ -56,6 +60,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" + v1alpha2 "k8s.io/api/resource/v1alpha2" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -98,9 +103,19 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case v1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1().ValidatingWebhookConfigurations().Informer()}, nil + // Group=admissionregistration.k8s.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("validatingadmissionpolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1alpha1().ValidatingAdmissionPolicies().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1alpha1().ValidatingAdmissionPolicyBindings().Informer()}, nil + // Group=admissionregistration.k8s.io, Version=v1beta1 case v1beta1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().MutatingWebhookConfigurations().Informer()}, nil + case v1beta1.SchemeGroupVersion.WithResource("validatingadmissionpolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().ValidatingAdmissionPolicies().Informer()}, nil + case v1beta1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().ValidatingAdmissionPolicyBindings().Informer()}, nil case v1beta1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().ValidatingWebhookConfigurations().Informer()}, nil @@ -166,6 +181,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case certificatesv1.SchemeGroupVersion.WithResource("certificatesigningrequests"): return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1().CertificateSigningRequests().Informer()}, nil + // Group=certificates.k8s.io, Version=v1alpha1 + case certificatesv1alpha1.SchemeGroupVersion.WithResource("clustertrustbundles"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1alpha1().ClusterTrustBundles().Informer()}, nil + // Group=certificates.k8s.io, Version=v1beta1 case certificatesv1beta1.SchemeGroupVersion.WithResource("certificatesigningrequests"): return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1beta1().CertificateSigningRequests().Informer()}, nil @@ -237,15 +256,13 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().Ingresses().Informer()}, nil case extensionsv1beta1.SchemeGroupVersion.WithResource("networkpolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().NetworkPolicies().Informer()}, nil - case extensionsv1beta1.SchemeGroupVersion.WithResource("podsecuritypolicies"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().PodSecurityPolicies().Informer()}, nil case extensionsv1beta1.SchemeGroupVersion.WithResource("replicasets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().ReplicaSets().Informer()}, nil // Group=flowcontrol.apiserver.k8s.io, Version=v1alpha1 - case v1alpha1.SchemeGroupVersion.WithResource("flowschemas"): + case flowcontrolv1alpha1.SchemeGroupVersion.WithResource("flowschemas"): return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1alpha1().FlowSchemas().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): + case flowcontrolv1alpha1.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1alpha1().PriorityLevelConfigurations().Informer()}, nil // Group=flowcontrol.apiserver.k8s.io, Version=v1beta1 @@ -260,6 +277,12 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case flowcontrolv1beta2.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta2().PriorityLevelConfigurations().Informer()}, nil + // Group=flowcontrol.apiserver.k8s.io, Version=v1beta3 + case v1beta3.SchemeGroupVersion.WithResource("flowschemas"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta3().FlowSchemas().Informer()}, nil + case v1beta3.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta3().PriorityLevelConfigurations().Informer()}, nil + // Group=internal.apiserver.k8s.io, Version=v1alpha1 case apiserverinternalv1alpha1.SchemeGroupVersion.WithResource("storageversions"): return &genericInformer{resource: resource.GroupResource(), informer: f.Internal().V1alpha1().StorageVersions().Informer()}, nil @@ -272,6 +295,12 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case networkingv1.SchemeGroupVersion.WithResource("networkpolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().NetworkPolicies().Informer()}, nil + // Group=networking.k8s.io, Version=v1alpha1 + case networkingv1alpha1.SchemeGroupVersion.WithResource("clustercidrs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().ClusterCIDRs().Informer()}, nil + case networkingv1alpha1.SchemeGroupVersion.WithResource("ipaddresses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().IPAddresses().Informer()}, nil + // Group=networking.k8s.io, Version=v1beta1 case networkingv1beta1.SchemeGroupVersion.WithResource("ingresses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().Ingresses().Informer()}, nil @@ -330,6 +359,16 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case rbacv1beta1.SchemeGroupVersion.WithResource("rolebindings"): return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().RoleBindings().Informer()}, nil + // Group=resource.k8s.io, Version=v1alpha2 + case v1alpha2.SchemeGroupVersion.WithResource("podschedulingcontexts"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().PodSchedulingContexts().Informer()}, nil + case v1alpha2.SchemeGroupVersion.WithResource("resourceclaims"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClaims().Informer()}, nil + case v1alpha2.SchemeGroupVersion.WithResource("resourceclaimtemplates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClaimTemplates().Informer()}, nil + case v1alpha2.SchemeGroupVersion.WithResource("resourceclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClasses().Informer()}, nil + // Group=scheduling.k8s.io, Version=v1 case schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1().PriorityClasses().Informer()}, nil diff --git a/vendor/k8s.io/client-go/informers/networking/interface.go b/vendor/k8s.io/client-go/informers/networking/interface.go index 4a028d5d1..1c775c465 100644 --- a/vendor/k8s.io/client-go/informers/networking/interface.go +++ b/vendor/k8s.io/client-go/informers/networking/interface.go @@ -21,6 +21,7 @@ package networking import ( internalinterfaces "k8s.io/client-go/informers/internalinterfaces" v1 "k8s.io/client-go/informers/networking/v1" + v1alpha1 "k8s.io/client-go/informers/networking/v1alpha1" v1beta1 "k8s.io/client-go/informers/networking/v1beta1" ) @@ -28,6 +29,8 @@ import ( type Interface interface { // V1 provides access to shared informers for resources in V1. V1() v1.Interface + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface } @@ -48,6 +51,11 @@ func (g *group) V1() v1.Interface { return v1.New(g.factory, g.namespace, g.tweakListOptions) } +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} + // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/vendor/k8s.io/client-go/informers/networking/v1alpha1/clustercidr.go b/vendor/k8s.io/client-go/informers/networking/v1alpha1/clustercidr.go new file mode 100644 index 000000000..cefd0f8a1 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/networking/v1alpha1/clustercidr.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/networking/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterCIDRInformer provides access to a shared informer and lister for +// ClusterCIDRs. +type ClusterCIDRInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ClusterCIDRLister +} + +type clusterCIDRInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterCIDRInformer constructs a new informer for ClusterCIDR type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterCIDRInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterCIDRInformer constructs a new informer for ClusterCIDR type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha1().ClusterCIDRs().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha1().ClusterCIDRs().Watch(context.TODO(), options) + }, + }, + &networkingv1alpha1.ClusterCIDR{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterCIDRInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterCIDRInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterCIDRInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networkingv1alpha1.ClusterCIDR{}, f.defaultInformer) +} + +func (f *clusterCIDRInformer) Lister() v1alpha1.ClusterCIDRLister { + return v1alpha1.NewClusterCIDRLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/networking/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/networking/v1alpha1/interface.go new file mode 100644 index 000000000..07e7d208c --- /dev/null +++ b/vendor/k8s.io/client-go/informers/networking/v1alpha1/interface.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ClusterCIDRs returns a ClusterCIDRInformer. + ClusterCIDRs() ClusterCIDRInformer + // IPAddresses returns a IPAddressInformer. + IPAddresses() IPAddressInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ClusterCIDRs returns a ClusterCIDRInformer. +func (v *version) ClusterCIDRs() ClusterCIDRInformer { + return &clusterCIDRInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// IPAddresses returns a IPAddressInformer. +func (v *version) IPAddresses() IPAddressInformer { + return &iPAddressInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/informers/networking/v1alpha1/ipaddress.go new file mode 100644 index 000000000..a1083dbf0 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/networking/v1alpha1/ipaddress.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/networking/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// IPAddressInformer provides access to a shared informer and lister for +// IPAddresses. +type IPAddressInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.IPAddressLister +} + +type iPAddressInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewIPAddressInformer constructs a new informer for IPAddress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewIPAddressInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredIPAddressInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredIPAddressInformer constructs a new informer for IPAddress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredIPAddressInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha1().IPAddresses().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha1().IPAddresses().Watch(context.TODO(), options) + }, + }, + &networkingv1alpha1.IPAddress{}, + resyncPeriod, + indexers, + ) +} + +func (f *iPAddressInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredIPAddressInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *iPAddressInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networkingv1alpha1.IPAddress{}, f.defaultInformer) +} + +func (f *iPAddressInformer) Lister() v1alpha1.IPAddressLister { + return v1alpha1.NewIPAddressLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/resource/interface.go b/vendor/k8s.io/client-go/informers/resource/interface.go new file mode 100644 index 000000000..3fcce8ae9 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/resource/interface.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package resource + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + v1alpha2 "k8s.io/client-go/informers/resource/v1alpha2" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha2 provides access to shared informers for resources in V1alpha2. + V1alpha2() v1alpha2.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha2 returns a new v1alpha2.Interface. +func (g *group) V1alpha2() v1alpha2.Interface { + return v1alpha2.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha2/interface.go b/vendor/k8s.io/client-go/informers/resource/v1alpha2/interface.go new file mode 100644 index 000000000..23f817c62 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha2/interface.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // PodSchedulingContexts returns a PodSchedulingContextInformer. + PodSchedulingContexts() PodSchedulingContextInformer + // ResourceClaims returns a ResourceClaimInformer. + ResourceClaims() ResourceClaimInformer + // ResourceClaimTemplates returns a ResourceClaimTemplateInformer. + ResourceClaimTemplates() ResourceClaimTemplateInformer + // ResourceClasses returns a ResourceClassInformer. + ResourceClasses() ResourceClassInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// PodSchedulingContexts returns a PodSchedulingContextInformer. +func (v *version) PodSchedulingContexts() PodSchedulingContextInformer { + return &podSchedulingContextInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ResourceClaims returns a ResourceClaimInformer. +func (v *version) ResourceClaims() ResourceClaimInformer { + return &resourceClaimInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ResourceClaimTemplates returns a ResourceClaimTemplateInformer. +func (v *version) ResourceClaimTemplates() ResourceClaimTemplateInformer { + return &resourceClaimTemplateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ResourceClasses returns a ResourceClassInformer. +func (v *version) ResourceClasses() ResourceClassInformer { + return &resourceClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha2/podschedulingcontext.go b/vendor/k8s.io/client-go/informers/resource/v1alpha2/podschedulingcontext.go new file mode 100644 index 000000000..b4aabb376 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha2/podschedulingcontext.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "context" + time "time" + + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" + cache "k8s.io/client-go/tools/cache" +) + +// PodSchedulingContextInformer provides access to a shared informer and lister for +// PodSchedulingContexts. +type PodSchedulingContextInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha2.PodSchedulingContextLister +} + +type podSchedulingContextInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPodSchedulingContextInformer constructs a new informer for PodSchedulingContext type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodSchedulingContextInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodSchedulingContextInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodSchedulingContextInformer constructs a new informer for PodSchedulingContext type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodSchedulingContextInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1alpha2().PodSchedulingContexts(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1alpha2().PodSchedulingContexts(namespace).Watch(context.TODO(), options) + }, + }, + &resourcev1alpha2.PodSchedulingContext{}, + resyncPeriod, + indexers, + ) +} + +func (f *podSchedulingContextInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodSchedulingContextInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podSchedulingContextInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&resourcev1alpha2.PodSchedulingContext{}, f.defaultInformer) +} + +func (f *podSchedulingContextInformer) Lister() v1alpha2.PodSchedulingContextLister { + return v1alpha2.NewPodSchedulingContextLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaim.go b/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaim.go new file mode 100644 index 000000000..3af936891 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaim.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "context" + time "time" + + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceClaimInformer provides access to a shared informer and lister for +// ResourceClaims. +type ResourceClaimInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha2.ResourceClaimLister +} + +type resourceClaimInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewResourceClaimInformer constructs a new informer for ResourceClaim type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewResourceClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceClaimInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredResourceClaimInformer constructs a new informer for ResourceClaim type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResourceClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1alpha2().ResourceClaims(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1alpha2().ResourceClaims(namespace).Watch(context.TODO(), options) + }, + }, + &resourcev1alpha2.ResourceClaim{}, + resyncPeriod, + indexers, + ) +} + +func (f *resourceClaimInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceClaimInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *resourceClaimInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&resourcev1alpha2.ResourceClaim{}, f.defaultInformer) +} + +func (f *resourceClaimInformer) Lister() v1alpha2.ResourceClaimLister { + return v1alpha2.NewResourceClaimLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimtemplate.go b/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimtemplate.go new file mode 100644 index 000000000..13f4ad835 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimtemplate.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "context" + time "time" + + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceClaimTemplateInformer provides access to a shared informer and lister for +// ResourceClaimTemplates. +type ResourceClaimTemplateInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha2.ResourceClaimTemplateLister +} + +type resourceClaimTemplateInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewResourceClaimTemplateInformer constructs a new informer for ResourceClaimTemplate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewResourceClaimTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceClaimTemplateInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredResourceClaimTemplateInformer constructs a new informer for ResourceClaimTemplate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResourceClaimTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1alpha2().ResourceClaimTemplates(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1alpha2().ResourceClaimTemplates(namespace).Watch(context.TODO(), options) + }, + }, + &resourcev1alpha2.ResourceClaimTemplate{}, + resyncPeriod, + indexers, + ) +} + +func (f *resourceClaimTemplateInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceClaimTemplateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *resourceClaimTemplateInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&resourcev1alpha2.ResourceClaimTemplate{}, f.defaultInformer) +} + +func (f *resourceClaimTemplateInformer) Lister() v1alpha2.ResourceClaimTemplateLister { + return v1alpha2.NewResourceClaimTemplateLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclass.go b/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclass.go new file mode 100644 index 000000000..cb76d78fe --- /dev/null +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclass.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "context" + time "time" + + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceClassInformer provides access to a shared informer and lister for +// ResourceClasses. +type ResourceClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha2.ResourceClassLister +} + +type resourceClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewResourceClassInformer constructs a new informer for ResourceClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewResourceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredResourceClassInformer constructs a new informer for ResourceClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResourceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1alpha2().ResourceClasses().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1alpha2().ResourceClasses().Watch(context.TODO(), options) + }, + }, + &resourcev1alpha2.ResourceClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *resourceClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *resourceClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&resourcev1alpha2.ResourceClass{}, f.defaultInformer) +} + +func (f *resourceClassInformer) Lister() v1alpha2.ResourceClassLister { + return v1alpha2.NewResourceClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index e46c0537f..6345f2fb6 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -24,12 +24,14 @@ import ( discovery "k8s.io/client-go/discovery" admissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" + admissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" internalv1alpha1 "k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" + authenticationv1alpha1 "k8s.io/client-go/kubernetes/typed/authentication/v1alpha1" authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" authorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" @@ -40,6 +42,7 @@ import ( batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" batchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" certificatesv1 "k8s.io/client-go/kubernetes/typed/certificates/v1" + certificatesv1alpha1 "k8s.io/client-go/kubernetes/typed/certificates/v1alpha1" certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" @@ -52,7 +55,9 @@ import ( flowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" flowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" + flowcontrolv1beta3 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" + networkingv1alpha1 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1" networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" nodev1 "k8s.io/client-go/kubernetes/typed/node/v1" nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" @@ -62,6 +67,7 @@ import ( rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" + resourcev1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2" schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" @@ -75,12 +81,14 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface AdmissionregistrationV1() admissionregistrationv1.AdmissionregistrationV1Interface + AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface InternalV1alpha1() internalv1alpha1.InternalV1alpha1Interface AppsV1() appsv1.AppsV1Interface AppsV1beta1() appsv1beta1.AppsV1beta1Interface AppsV1beta2() appsv1beta2.AppsV1beta2Interface AuthenticationV1() authenticationv1.AuthenticationV1Interface + AuthenticationV1alpha1() authenticationv1alpha1.AuthenticationV1alpha1Interface AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface AuthorizationV1() authorizationv1.AuthorizationV1Interface AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface @@ -92,6 +100,7 @@ type Interface interface { BatchV1beta1() batchv1beta1.BatchV1beta1Interface CertificatesV1() certificatesv1.CertificatesV1Interface CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface + CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface CoordinationV1() coordinationv1.CoordinationV1Interface CoreV1() corev1.CoreV1Interface @@ -103,7 +112,9 @@ type Interface interface { FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface FlowcontrolV1beta2() flowcontrolv1beta2.FlowcontrolV1beta2Interface + FlowcontrolV1beta3() flowcontrolv1beta3.FlowcontrolV1beta3Interface NetworkingV1() networkingv1.NetworkingV1Interface + NetworkingV1alpha1() networkingv1alpha1.NetworkingV1alpha1Interface NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface NodeV1() nodev1.NodeV1Interface NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface @@ -113,6 +124,7 @@ type Interface interface { RbacV1() rbacv1.RbacV1Interface RbacV1beta1() rbacv1beta1.RbacV1beta1Interface RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface + ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface SchedulingV1() schedulingv1.SchedulingV1Interface @@ -121,55 +133,60 @@ type Interface interface { StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface } -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. +// Clientset contains the clients for groups. type Clientset struct { *discovery.DiscoveryClient - admissionregistrationV1 *admissionregistrationv1.AdmissionregistrationV1Client - admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client - internalV1alpha1 *internalv1alpha1.InternalV1alpha1Client - appsV1 *appsv1.AppsV1Client - appsV1beta1 *appsv1beta1.AppsV1beta1Client - appsV1beta2 *appsv1beta2.AppsV1beta2Client - authenticationV1 *authenticationv1.AuthenticationV1Client - authenticationV1beta1 *authenticationv1beta1.AuthenticationV1beta1Client - authorizationV1 *authorizationv1.AuthorizationV1Client - authorizationV1beta1 *authorizationv1beta1.AuthorizationV1beta1Client - autoscalingV1 *autoscalingv1.AutoscalingV1Client - autoscalingV2 *autoscalingv2.AutoscalingV2Client - autoscalingV2beta1 *autoscalingv2beta1.AutoscalingV2beta1Client - autoscalingV2beta2 *autoscalingv2beta2.AutoscalingV2beta2Client - batchV1 *batchv1.BatchV1Client - batchV1beta1 *batchv1beta1.BatchV1beta1Client - certificatesV1 *certificatesv1.CertificatesV1Client - certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client - coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client - coordinationV1 *coordinationv1.CoordinationV1Client - coreV1 *corev1.CoreV1Client - discoveryV1 *discoveryv1.DiscoveryV1Client - discoveryV1beta1 *discoveryv1beta1.DiscoveryV1beta1Client - eventsV1 *eventsv1.EventsV1Client - eventsV1beta1 *eventsv1beta1.EventsV1beta1Client - extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client - flowcontrolV1alpha1 *flowcontrolv1alpha1.FlowcontrolV1alpha1Client - flowcontrolV1beta1 *flowcontrolv1beta1.FlowcontrolV1beta1Client - flowcontrolV1beta2 *flowcontrolv1beta2.FlowcontrolV1beta2Client - networkingV1 *networkingv1.NetworkingV1Client - networkingV1beta1 *networkingv1beta1.NetworkingV1beta1Client - nodeV1 *nodev1.NodeV1Client - nodeV1alpha1 *nodev1alpha1.NodeV1alpha1Client - nodeV1beta1 *nodev1beta1.NodeV1beta1Client - policyV1 *policyv1.PolicyV1Client - policyV1beta1 *policyv1beta1.PolicyV1beta1Client - rbacV1 *rbacv1.RbacV1Client - rbacV1beta1 *rbacv1beta1.RbacV1beta1Client - rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client - schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client - schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client - schedulingV1 *schedulingv1.SchedulingV1Client - storageV1beta1 *storagev1beta1.StorageV1beta1Client - storageV1 *storagev1.StorageV1Client - storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client + admissionregistrationV1 *admissionregistrationv1.AdmissionregistrationV1Client + admissionregistrationV1alpha1 *admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Client + admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client + internalV1alpha1 *internalv1alpha1.InternalV1alpha1Client + appsV1 *appsv1.AppsV1Client + appsV1beta1 *appsv1beta1.AppsV1beta1Client + appsV1beta2 *appsv1beta2.AppsV1beta2Client + authenticationV1 *authenticationv1.AuthenticationV1Client + authenticationV1alpha1 *authenticationv1alpha1.AuthenticationV1alpha1Client + authenticationV1beta1 *authenticationv1beta1.AuthenticationV1beta1Client + authorizationV1 *authorizationv1.AuthorizationV1Client + authorizationV1beta1 *authorizationv1beta1.AuthorizationV1beta1Client + autoscalingV1 *autoscalingv1.AutoscalingV1Client + autoscalingV2 *autoscalingv2.AutoscalingV2Client + autoscalingV2beta1 *autoscalingv2beta1.AutoscalingV2beta1Client + autoscalingV2beta2 *autoscalingv2beta2.AutoscalingV2beta2Client + batchV1 *batchv1.BatchV1Client + batchV1beta1 *batchv1beta1.BatchV1beta1Client + certificatesV1 *certificatesv1.CertificatesV1Client + certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client + certificatesV1alpha1 *certificatesv1alpha1.CertificatesV1alpha1Client + coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client + coordinationV1 *coordinationv1.CoordinationV1Client + coreV1 *corev1.CoreV1Client + discoveryV1 *discoveryv1.DiscoveryV1Client + discoveryV1beta1 *discoveryv1beta1.DiscoveryV1beta1Client + eventsV1 *eventsv1.EventsV1Client + eventsV1beta1 *eventsv1beta1.EventsV1beta1Client + extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client + flowcontrolV1alpha1 *flowcontrolv1alpha1.FlowcontrolV1alpha1Client + flowcontrolV1beta1 *flowcontrolv1beta1.FlowcontrolV1beta1Client + flowcontrolV1beta2 *flowcontrolv1beta2.FlowcontrolV1beta2Client + flowcontrolV1beta3 *flowcontrolv1beta3.FlowcontrolV1beta3Client + networkingV1 *networkingv1.NetworkingV1Client + networkingV1alpha1 *networkingv1alpha1.NetworkingV1alpha1Client + networkingV1beta1 *networkingv1beta1.NetworkingV1beta1Client + nodeV1 *nodev1.NodeV1Client + nodeV1alpha1 *nodev1alpha1.NodeV1alpha1Client + nodeV1beta1 *nodev1beta1.NodeV1beta1Client + policyV1 *policyv1.PolicyV1Client + policyV1beta1 *policyv1beta1.PolicyV1beta1Client + rbacV1 *rbacv1.RbacV1Client + rbacV1beta1 *rbacv1beta1.RbacV1beta1Client + rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client + resourceV1alpha2 *resourcev1alpha2.ResourceV1alpha2Client + schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client + schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client + schedulingV1 *schedulingv1.SchedulingV1Client + storageV1beta1 *storagev1beta1.StorageV1beta1Client + storageV1 *storagev1.StorageV1Client + storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client } // AdmissionregistrationV1 retrieves the AdmissionregistrationV1Client @@ -177,6 +194,11 @@ func (c *Clientset) AdmissionregistrationV1() admissionregistrationv1.Admissionr return c.admissionregistrationV1 } +// AdmissionregistrationV1alpha1 retrieves the AdmissionregistrationV1alpha1Client +func (c *Clientset) AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface { + return c.admissionregistrationV1alpha1 +} + // AdmissionregistrationV1beta1 retrieves the AdmissionregistrationV1beta1Client func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { return c.admissionregistrationV1beta1 @@ -207,6 +229,11 @@ func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interfac return c.authenticationV1 } +// AuthenticationV1alpha1 retrieves the AuthenticationV1alpha1Client +func (c *Clientset) AuthenticationV1alpha1() authenticationv1alpha1.AuthenticationV1alpha1Interface { + return c.authenticationV1alpha1 +} + // AuthenticationV1beta1 retrieves the AuthenticationV1beta1Client func (c *Clientset) AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface { return c.authenticationV1beta1 @@ -262,6 +289,11 @@ func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta return c.certificatesV1beta1 } +// CertificatesV1alpha1 retrieves the CertificatesV1alpha1Client +func (c *Clientset) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface { + return c.certificatesV1alpha1 +} + // CoordinationV1beta1 retrieves the CoordinationV1beta1Client func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface { return c.coordinationV1beta1 @@ -317,11 +349,21 @@ func (c *Clientset) FlowcontrolV1beta2() flowcontrolv1beta2.FlowcontrolV1beta2In return c.flowcontrolV1beta2 } +// FlowcontrolV1beta3 retrieves the FlowcontrolV1beta3Client +func (c *Clientset) FlowcontrolV1beta3() flowcontrolv1beta3.FlowcontrolV1beta3Interface { + return c.flowcontrolV1beta3 +} + // NetworkingV1 retrieves the NetworkingV1Client func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return c.networkingV1 } +// NetworkingV1alpha1 retrieves the NetworkingV1alpha1Client +func (c *Clientset) NetworkingV1alpha1() networkingv1alpha1.NetworkingV1alpha1Interface { + return c.networkingV1alpha1 +} + // NetworkingV1beta1 retrieves the NetworkingV1beta1Client func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface { return c.networkingV1beta1 @@ -367,6 +409,11 @@ func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { return c.rbacV1alpha1 } +// ResourceV1alpha2 retrieves the ResourceV1alpha2Client +func (c *Clientset) ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface { + return c.resourceV1alpha2 +} + // SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client func (c *Clientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface { return c.schedulingV1alpha1 @@ -445,6 +492,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.admissionregistrationV1alpha1, err = admissionregistrationv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -469,6 +520,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.authenticationV1alpha1, err = authenticationv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.authenticationV1beta1, err = authenticationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -513,6 +568,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.certificatesV1alpha1, err = certificatesv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.coordinationV1beta1, err = coordinationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -557,10 +616,18 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.flowcontrolV1beta3, err = flowcontrolv1beta3.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.networkingV1, err = networkingv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } + cs.networkingV1alpha1, err = networkingv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.networkingV1beta1, err = networkingv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -597,6 +664,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.resourceV1alpha2, err = resourcev1alpha2.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.schedulingV1alpha1, err = schedulingv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -643,12 +714,14 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { func New(c rest.Interface) *Clientset { var cs Clientset cs.admissionregistrationV1 = admissionregistrationv1.New(c) + cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.New(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.New(c) cs.internalV1alpha1 = internalv1alpha1.New(c) cs.appsV1 = appsv1.New(c) cs.appsV1beta1 = appsv1beta1.New(c) cs.appsV1beta2 = appsv1beta2.New(c) cs.authenticationV1 = authenticationv1.New(c) + cs.authenticationV1alpha1 = authenticationv1alpha1.New(c) cs.authenticationV1beta1 = authenticationv1beta1.New(c) cs.authorizationV1 = authorizationv1.New(c) cs.authorizationV1beta1 = authorizationv1beta1.New(c) @@ -660,6 +733,7 @@ func New(c rest.Interface) *Clientset { cs.batchV1beta1 = batchv1beta1.New(c) cs.certificatesV1 = certificatesv1.New(c) cs.certificatesV1beta1 = certificatesv1beta1.New(c) + cs.certificatesV1alpha1 = certificatesv1alpha1.New(c) cs.coordinationV1beta1 = coordinationv1beta1.New(c) cs.coordinationV1 = coordinationv1.New(c) cs.coreV1 = corev1.New(c) @@ -671,7 +745,9 @@ func New(c rest.Interface) *Clientset { cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.New(c) cs.flowcontrolV1beta1 = flowcontrolv1beta1.New(c) cs.flowcontrolV1beta2 = flowcontrolv1beta2.New(c) + cs.flowcontrolV1beta3 = flowcontrolv1beta3.New(c) cs.networkingV1 = networkingv1.New(c) + cs.networkingV1alpha1 = networkingv1alpha1.New(c) cs.networkingV1beta1 = networkingv1beta1.New(c) cs.nodeV1 = nodev1.New(c) cs.nodeV1alpha1 = nodev1alpha1.New(c) @@ -681,6 +757,7 @@ func New(c rest.Interface) *Clientset { cs.rbacV1 = rbacv1.New(c) cs.rbacV1beta1 = rbacv1beta1.New(c) cs.rbacV1alpha1 = rbacv1alpha1.New(c) + cs.resourceV1alpha2 = resourcev1alpha2.New(c) cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) cs.schedulingV1beta1 = schedulingv1beta1.New(c) cs.schedulingV1 = schedulingv1.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/doc.go b/vendor/k8s.io/client-go/kubernetes/doc.go index b272334ad..9cef4242f 100644 --- a/vendor/k8s.io/client-go/kubernetes/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. +// Package kubernetes holds packages which implement a clientset for Kubernetes +// APIs. package kubernetes diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index b41466151..64d3ce2a7 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -20,12 +20,14 @@ package scheme import ( admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" internalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" authenticationv1 "k8s.io/api/authentication/v1" + authenticationv1alpha1 "k8s.io/api/authentication/v1alpha1" authenticationv1beta1 "k8s.io/api/authentication/v1beta1" authorizationv1 "k8s.io/api/authorization/v1" authorizationv1beta1 "k8s.io/api/authorization/v1beta1" @@ -36,6 +38,7 @@ import ( batchv1 "k8s.io/api/batch/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" certificatesv1 "k8s.io/api/certificates/v1" + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" coordinationv1 "k8s.io/api/coordination/v1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" @@ -48,7 +51,9 @@ import ( flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" networkingv1 "k8s.io/api/networking/v1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" networkingv1beta1 "k8s.io/api/networking/v1beta1" nodev1 "k8s.io/api/node/v1" nodev1alpha1 "k8s.io/api/node/v1alpha1" @@ -58,6 +63,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -76,12 +82,14 @@ var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ admissionregistrationv1.AddToScheme, + admissionregistrationv1alpha1.AddToScheme, admissionregistrationv1beta1.AddToScheme, internalv1alpha1.AddToScheme, appsv1.AddToScheme, appsv1beta1.AddToScheme, appsv1beta2.AddToScheme, authenticationv1.AddToScheme, + authenticationv1alpha1.AddToScheme, authenticationv1beta1.AddToScheme, authorizationv1.AddToScheme, authorizationv1beta1.AddToScheme, @@ -93,6 +101,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ batchv1beta1.AddToScheme, certificatesv1.AddToScheme, certificatesv1beta1.AddToScheme, + certificatesv1alpha1.AddToScheme, coordinationv1beta1.AddToScheme, coordinationv1.AddToScheme, corev1.AddToScheme, @@ -104,7 +113,9 @@ var localSchemeBuilder = runtime.SchemeBuilder{ flowcontrolv1alpha1.AddToScheme, flowcontrolv1beta1.AddToScheme, flowcontrolv1beta2.AddToScheme, + flowcontrolv1beta3.AddToScheme, networkingv1.AddToScheme, + networkingv1alpha1.AddToScheme, networkingv1beta1.AddToScheme, nodev1.AddToScheme, nodev1alpha1.AddToScheme, @@ -114,6 +125,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ rbacv1.AddToScheme, rbacv1beta1.AddToScheme, rbacv1alpha1.AddToScheme, + resourcev1alpha2.AddToScheme, schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, schedulingv1.AddToScheme, @@ -125,14 +137,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go new file mode 100644 index 000000000..f6102d25a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go @@ -0,0 +1,112 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AdmissionregistrationV1alpha1Interface interface { + RESTClient() rest.Interface + ValidatingAdmissionPoliciesGetter + ValidatingAdmissionPolicyBindingsGetter +} + +// AdmissionregistrationV1alpha1Client is used to interact with features provided by the admissionregistration.k8s.io group. +type AdmissionregistrationV1alpha1Client struct { + restClient rest.Interface +} + +func (c *AdmissionregistrationV1alpha1Client) ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInterface { + return newValidatingAdmissionPolicies(c) +} + +func (c *AdmissionregistrationV1alpha1Client) ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInterface { + return newValidatingAdmissionPolicyBindings(c) +} + +// NewForConfig creates a new AdmissionregistrationV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*AdmissionregistrationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AdmissionregistrationV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AdmissionregistrationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &AdmissionregistrationV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new AdmissionregistrationV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AdmissionregistrationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *AdmissionregistrationV1alpha1Client { + return &AdmissionregistrationV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AdmissionregistrationV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go new file mode 100644 index 000000000..df51baa4d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..94562da59 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type ValidatingAdmissionPolicyExpansion interface{} + +type ValidatingAdmissionPolicyBindingExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go new file mode 100644 index 000000000..1d994b5ab --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -0,0 +1,243 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ValidatingAdmissionPoliciesGetter has a method to return a ValidatingAdmissionPolicyInterface. +// A group's client should implement this interface. +type ValidatingAdmissionPoliciesGetter interface { + ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInterface +} + +// ValidatingAdmissionPolicyInterface has methods to work with ValidatingAdmissionPolicy resources. +type ValidatingAdmissionPolicyInterface interface { + Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) + Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) + UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ValidatingAdmissionPolicyList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error) + Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) + ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) + ValidatingAdmissionPolicyExpansion +} + +// validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface +type validatingAdmissionPolicies struct { + client rest.Interface +} + +// newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies +func newValidatingAdmissionPolicies(c *AdmissionregistrationV1alpha1Client) *validatingAdmissionPolicies { + return &validatingAdmissionPolicies{ + client: c.RESTClient(), + } +} + +// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. +func (c *validatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + result = &v1alpha1.ValidatingAdmissionPolicy{} + err = c.client.Get(). + Resource("validatingadmissionpolicies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. +func (c *validatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ValidatingAdmissionPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ValidatingAdmissionPolicyList{} + err = c.client.Get(). + Resource("validatingadmissionpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. +func (c *validatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("validatingadmissionpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. +func (c *validatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + result = &v1alpha1.ValidatingAdmissionPolicy{} + err = c.client.Post(). + Resource("validatingadmissionpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(validatingAdmissionPolicy). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. +func (c *validatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + result = &v1alpha1.ValidatingAdmissionPolicy{} + err = c.client.Put(). + Resource("validatingadmissionpolicies"). + Name(validatingAdmissionPolicy.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(validatingAdmissionPolicy). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *validatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + result = &v1alpha1.ValidatingAdmissionPolicy{} + err = c.client.Put(). + Resource("validatingadmissionpolicies"). + Name(validatingAdmissionPolicy.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(validatingAdmissionPolicy). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs. +func (c *validatingAdmissionPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("validatingadmissionpolicies"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *validatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("validatingadmissionpolicies"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched validatingAdmissionPolicy. +func (c *validatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + result = &v1alpha1.ValidatingAdmissionPolicy{} + err = c.client.Patch(pt). + Resource("validatingadmissionpolicies"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy. +func (c *validatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + if validatingAdmissionPolicy == nil { + return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(validatingAdmissionPolicy) + if err != nil { + return nil, err + } + name := validatingAdmissionPolicy.Name + if name == nil { + return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") + } + result = &v1alpha1.ValidatingAdmissionPolicy{} + err = c.client.Patch(types.ApplyPatchType). + Resource("validatingadmissionpolicies"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *validatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + if validatingAdmissionPolicy == nil { + return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(validatingAdmissionPolicy) + if err != nil { + return nil, err + } + + name := validatingAdmissionPolicy.Name + if name == nil { + return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") + } + + result = &v1alpha1.ValidatingAdmissionPolicy{} + err = c.client.Patch(types.ApplyPatchType). + Resource("validatingadmissionpolicies"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go new file mode 100644 index 000000000..39823ca82 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go @@ -0,0 +1,197 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ValidatingAdmissionPolicyBindingsGetter has a method to return a ValidatingAdmissionPolicyBindingInterface. +// A group's client should implement this interface. +type ValidatingAdmissionPolicyBindingsGetter interface { + ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInterface +} + +// ValidatingAdmissionPolicyBindingInterface has methods to work with ValidatingAdmissionPolicyBinding resources. +type ValidatingAdmissionPolicyBindingInterface interface { + Create(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (*v1alpha1.ValidatingAdmissionPolicyBinding, error) + Update(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicyBinding, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ValidatingAdmissionPolicyBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ValidatingAdmissionPolicyBindingList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) + Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) + ValidatingAdmissionPolicyBindingExpansion +} + +// validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface +type validatingAdmissionPolicyBindings struct { + client rest.Interface +} + +// newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings +func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1alpha1Client) *validatingAdmissionPolicyBindings { + return &validatingAdmissionPolicyBindings{ + client: c.RESTClient(), + } +} + +// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. +func (c *validatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { + result = &v1alpha1.ValidatingAdmissionPolicyBinding{} + err = c.client.Get(). + Resource("validatingadmissionpolicybindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. +func (c *validatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ValidatingAdmissionPolicyBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ValidatingAdmissionPolicyBindingList{} + err = c.client.Get(). + Resource("validatingadmissionpolicybindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. +func (c *validatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("validatingadmissionpolicybindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. +func (c *validatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { + result = &v1alpha1.ValidatingAdmissionPolicyBinding{} + err = c.client.Post(). + Resource("validatingadmissionpolicybindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(validatingAdmissionPolicyBinding). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. +func (c *validatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { + result = &v1alpha1.ValidatingAdmissionPolicyBinding{} + err = c.client.Put(). + Resource("validatingadmissionpolicybindings"). + Name(validatingAdmissionPolicyBinding.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(validatingAdmissionPolicyBinding). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs. +func (c *validatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("validatingadmissionpolicybindings"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *validatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("validatingadmissionpolicybindings"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. +func (c *validatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { + result = &v1alpha1.ValidatingAdmissionPolicyBinding{} + err = c.client.Patch(pt). + Resource("validatingadmissionpolicybindings"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding. +func (c *validatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { + if validatingAdmissionPolicyBinding == nil { + return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(validatingAdmissionPolicyBinding) + if err != nil { + return nil, err + } + name := validatingAdmissionPolicyBinding.Name + if name == nil { + return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") + } + result = &v1alpha1.ValidatingAdmissionPolicyBinding{} + err = c.client.Patch(types.ApplyPatchType). + Resource("validatingadmissionpolicybindings"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go index 8fda84b1d..5a0a17d9b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go @@ -29,6 +29,8 @@ import ( type AdmissionregistrationV1beta1Interface interface { RESTClient() rest.Interface MutatingWebhookConfigurationsGetter + ValidatingAdmissionPoliciesGetter + ValidatingAdmissionPolicyBindingsGetter ValidatingWebhookConfigurationsGetter } @@ -41,6 +43,14 @@ func (c *AdmissionregistrationV1beta1Client) MutatingWebhookConfigurations() Mut return newMutatingWebhookConfigurations(c) } +func (c *AdmissionregistrationV1beta1Client) ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInterface { + return newValidatingAdmissionPolicies(c) +} + +func (c *AdmissionregistrationV1beta1Client) ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInterface { + return newValidatingAdmissionPolicyBindings(c) +} + func (c *AdmissionregistrationV1beta1Client) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface { return newValidatingWebhookConfigurations(c) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go index 2aeb9c98a..56ad611f4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go @@ -20,4 +20,8 @@ package v1beta1 type MutatingWebhookConfigurationExpansion interface{} +type ValidatingAdmissionPolicyExpansion interface{} + +type ValidatingAdmissionPolicyBindingExpansion interface{} + type ValidatingWebhookConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go new file mode 100644 index 000000000..bea51b587 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go @@ -0,0 +1,243 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ValidatingAdmissionPoliciesGetter has a method to return a ValidatingAdmissionPolicyInterface. +// A group's client should implement this interface. +type ValidatingAdmissionPoliciesGetter interface { + ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInterface +} + +// ValidatingAdmissionPolicyInterface has methods to work with ValidatingAdmissionPolicy resources. +type ValidatingAdmissionPolicyInterface interface { + Create(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) + Update(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) + UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ValidatingAdmissionPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ValidatingAdmissionPolicyList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicy, err error) + Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) + ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) + ValidatingAdmissionPolicyExpansion +} + +// validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface +type validatingAdmissionPolicies struct { + client rest.Interface +} + +// newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies +func newValidatingAdmissionPolicies(c *AdmissionregistrationV1beta1Client) *validatingAdmissionPolicies { + return &validatingAdmissionPolicies{ + client: c.RESTClient(), + } +} + +// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. +func (c *validatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { + result = &v1beta1.ValidatingAdmissionPolicy{} + err = c.client.Get(). + Resource("validatingadmissionpolicies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. +func (c *validatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.ValidatingAdmissionPolicyList{} + err = c.client.Get(). + Resource("validatingadmissionpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. +func (c *validatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("validatingadmissionpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. +func (c *validatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { + result = &v1beta1.ValidatingAdmissionPolicy{} + err = c.client.Post(). + Resource("validatingadmissionpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(validatingAdmissionPolicy). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. +func (c *validatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { + result = &v1beta1.ValidatingAdmissionPolicy{} + err = c.client.Put(). + Resource("validatingadmissionpolicies"). + Name(validatingAdmissionPolicy.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(validatingAdmissionPolicy). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *validatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { + result = &v1beta1.ValidatingAdmissionPolicy{} + err = c.client.Put(). + Resource("validatingadmissionpolicies"). + Name(validatingAdmissionPolicy.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(validatingAdmissionPolicy). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs. +func (c *validatingAdmissionPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("validatingadmissionpolicies"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *validatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("validatingadmissionpolicies"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched validatingAdmissionPolicy. +func (c *validatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicy, err error) { + result = &v1beta1.ValidatingAdmissionPolicy{} + err = c.client.Patch(pt). + Resource("validatingadmissionpolicies"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy. +func (c *validatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { + if validatingAdmissionPolicy == nil { + return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(validatingAdmissionPolicy) + if err != nil { + return nil, err + } + name := validatingAdmissionPolicy.Name + if name == nil { + return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") + } + result = &v1beta1.ValidatingAdmissionPolicy{} + err = c.client.Patch(types.ApplyPatchType). + Resource("validatingadmissionpolicies"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *validatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { + if validatingAdmissionPolicy == nil { + return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(validatingAdmissionPolicy) + if err != nil { + return nil, err + } + + name := validatingAdmissionPolicy.Name + if name == nil { + return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") + } + + result = &v1beta1.ValidatingAdmissionPolicy{} + err = c.client.Patch(types.ApplyPatchType). + Resource("validatingadmissionpolicies"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go new file mode 100644 index 000000000..bba37bb04 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go @@ -0,0 +1,197 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ValidatingAdmissionPolicyBindingsGetter has a method to return a ValidatingAdmissionPolicyBindingInterface. +// A group's client should implement this interface. +type ValidatingAdmissionPolicyBindingsGetter interface { + ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInterface +} + +// ValidatingAdmissionPolicyBindingInterface has methods to work with ValidatingAdmissionPolicyBinding resources. +type ValidatingAdmissionPolicyBindingInterface interface { + Create(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (*v1beta1.ValidatingAdmissionPolicyBinding, error) + Update(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicyBinding, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ValidatingAdmissionPolicyBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ValidatingAdmissionPolicyBindingList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) + Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) + ValidatingAdmissionPolicyBindingExpansion +} + +// validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface +type validatingAdmissionPolicyBindings struct { + client rest.Interface +} + +// newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings +func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1beta1Client) *validatingAdmissionPolicyBindings { + return &validatingAdmissionPolicyBindings{ + client: c.RESTClient(), + } +} + +// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. +func (c *validatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { + result = &v1beta1.ValidatingAdmissionPolicyBinding{} + err = c.client.Get(). + Resource("validatingadmissionpolicybindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. +func (c *validatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.ValidatingAdmissionPolicyBindingList{} + err = c.client.Get(). + Resource("validatingadmissionpolicybindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. +func (c *validatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("validatingadmissionpolicybindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. +func (c *validatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { + result = &v1beta1.ValidatingAdmissionPolicyBinding{} + err = c.client.Post(). + Resource("validatingadmissionpolicybindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(validatingAdmissionPolicyBinding). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. +func (c *validatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { + result = &v1beta1.ValidatingAdmissionPolicyBinding{} + err = c.client.Put(). + Resource("validatingadmissionpolicybindings"). + Name(validatingAdmissionPolicyBinding.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(validatingAdmissionPolicyBinding). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs. +func (c *validatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("validatingadmissionpolicybindings"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *validatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("validatingadmissionpolicybindings"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. +func (c *validatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { + result = &v1beta1.ValidatingAdmissionPolicyBinding{} + err = c.client.Patch(pt). + Resource("validatingadmissionpolicybindings"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding. +func (c *validatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { + if validatingAdmissionPolicyBinding == nil { + return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(validatingAdmissionPolicyBinding) + if err != nil { + return nil, err + } + name := validatingAdmissionPolicyBinding.Name + if name == nil { + return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") + } + result = &v1beta1.ValidatingAdmissionPolicyBinding{} + err = c.client.Patch(types.ApplyPatchType). + Resource("validatingadmissionpolicybindings"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go index aea9d0e13..81be8b2e0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go @@ -28,6 +28,7 @@ import ( type AuthenticationV1Interface interface { RESTClient() rest.Interface + SelfSubjectReviewsGetter TokenReviewsGetter } @@ -36,6 +37,10 @@ type AuthenticationV1Client struct { restClient rest.Interface } +func (c *AuthenticationV1Client) SelfSubjectReviews() SelfSubjectReviewInterface { + return newSelfSubjectReviews(c) +} + func (c *AuthenticationV1Client) TokenReviews() TokenReviewInterface { return newTokenReviews(c) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go index 0413fb2b6..35f2c22b4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go @@ -18,4 +18,6 @@ limitations under the License. package v1 +type SelfSubjectReviewExpansion interface{} + type TokenReviewExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go new file mode 100644 index 000000000..bfb9603d6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go @@ -0,0 +1,64 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface. +// A group's client should implement this interface. +type SelfSubjectReviewsGetter interface { + SelfSubjectReviews() SelfSubjectReviewInterface +} + +// SelfSubjectReviewInterface has methods to work with SelfSubjectReview resources. +type SelfSubjectReviewInterface interface { + Create(ctx context.Context, selfSubjectReview *v1.SelfSubjectReview, opts metav1.CreateOptions) (*v1.SelfSubjectReview, error) + SelfSubjectReviewExpansion +} + +// selfSubjectReviews implements SelfSubjectReviewInterface +type selfSubjectReviews struct { + client rest.Interface +} + +// newSelfSubjectReviews returns a SelfSubjectReviews +func newSelfSubjectReviews(c *AuthenticationV1Client) *selfSubjectReviews { + return &selfSubjectReviews{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. +func (c *selfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1.SelfSubjectReview, opts metav1.CreateOptions) (result *v1.SelfSubjectReview, err error) { + result = &v1.SelfSubjectReview{} + err = c.client.Post(). + Resource("selfsubjectreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(selfSubjectReview). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go new file mode 100644 index 000000000..187392661 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "k8s.io/api/authentication/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AuthenticationV1alpha1Interface interface { + RESTClient() rest.Interface + SelfSubjectReviewsGetter +} + +// AuthenticationV1alpha1Client is used to interact with features provided by the authentication.k8s.io group. +type AuthenticationV1alpha1Client struct { + restClient rest.Interface +} + +func (c *AuthenticationV1alpha1Client) SelfSubjectReviews() SelfSubjectReviewInterface { + return newSelfSubjectReviews(c) +} + +// NewForConfig creates a new AuthenticationV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*AuthenticationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AuthenticationV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AuthenticationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &AuthenticationV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuthenticationV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthenticationV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthenticationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *AuthenticationV1alpha1Client { + return &AuthenticationV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthenticationV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/doc.go new file mode 100644 index 000000000..df51baa4d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..6a648364b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type SelfSubjectReviewExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go new file mode 100644 index 000000000..7f8b12a46 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go @@ -0,0 +1,64 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + + v1alpha1 "k8s.io/api/authentication/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface. +// A group's client should implement this interface. +type SelfSubjectReviewsGetter interface { + SelfSubjectReviews() SelfSubjectReviewInterface +} + +// SelfSubjectReviewInterface has methods to work with SelfSubjectReview resources. +type SelfSubjectReviewInterface interface { + Create(ctx context.Context, selfSubjectReview *v1alpha1.SelfSubjectReview, opts v1.CreateOptions) (*v1alpha1.SelfSubjectReview, error) + SelfSubjectReviewExpansion +} + +// selfSubjectReviews implements SelfSubjectReviewInterface +type selfSubjectReviews struct { + client rest.Interface +} + +// newSelfSubjectReviews returns a SelfSubjectReviews +func newSelfSubjectReviews(c *AuthenticationV1alpha1Client) *selfSubjectReviews { + return &selfSubjectReviews{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. +func (c *selfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1alpha1.SelfSubjectReview, opts v1.CreateOptions) (result *v1alpha1.SelfSubjectReview, err error) { + result = &v1alpha1.SelfSubjectReview{} + err = c.client.Post(). + Resource("selfsubjectreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(selfSubjectReview). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go index 218cb60c3..7823729e0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go @@ -28,6 +28,7 @@ import ( type AuthenticationV1beta1Interface interface { RESTClient() rest.Interface + SelfSubjectReviewsGetter TokenReviewsGetter } @@ -36,6 +37,10 @@ type AuthenticationV1beta1Client struct { restClient rest.Interface } +func (c *AuthenticationV1beta1Client) SelfSubjectReviews() SelfSubjectReviewInterface { + return newSelfSubjectReviews(c) +} + func (c *AuthenticationV1beta1Client) TokenReviews() TokenReviewInterface { return newTokenReviews(c) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go index 60bf15ab9..527a458d7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go @@ -18,4 +18,6 @@ limitations under the License. package v1beta1 +type SelfSubjectReviewExpansion interface{} + type TokenReviewExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go new file mode 100644 index 000000000..9d54826a3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go @@ -0,0 +1,64 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + + v1beta1 "k8s.io/api/authentication/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface. +// A group's client should implement this interface. +type SelfSubjectReviewsGetter interface { + SelfSubjectReviews() SelfSubjectReviewInterface +} + +// SelfSubjectReviewInterface has methods to work with SelfSubjectReview resources. +type SelfSubjectReviewInterface interface { + Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectReview, error) + SelfSubjectReviewExpansion +} + +// selfSubjectReviews implements SelfSubjectReviewInterface +type selfSubjectReviews struct { + client rest.Interface +} + +// newSelfSubjectReviews returns a SelfSubjectReviews +func newSelfSubjectReviews(c *AuthenticationV1beta1Client) *selfSubjectReviews { + return &selfSubjectReviews{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. +func (c *selfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectReview, err error) { + result = &v1beta1.SelfSubjectReview{} + err = c.client.Post(). + Resource("selfsubjectreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(selfSubjectReview). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go new file mode 100644 index 000000000..a9050af94 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "k8s.io/api/certificates/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type CertificatesV1alpha1Interface interface { + RESTClient() rest.Interface + ClusterTrustBundlesGetter +} + +// CertificatesV1alpha1Client is used to interact with features provided by the certificates.k8s.io group. +type CertificatesV1alpha1Client struct { + restClient rest.Interface +} + +func (c *CertificatesV1alpha1Client) ClusterTrustBundles() ClusterTrustBundleInterface { + return newClusterTrustBundles(c) +} + +// NewForConfig creates a new CertificatesV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*CertificatesV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CertificatesV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CertificatesV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &CertificatesV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new CertificatesV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CertificatesV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CertificatesV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *CertificatesV1alpha1Client { + return &CertificatesV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CertificatesV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go new file mode 100644 index 000000000..970fb15e6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go @@ -0,0 +1,197 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "k8s.io/api/certificates/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + certificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ClusterTrustBundlesGetter has a method to return a ClusterTrustBundleInterface. +// A group's client should implement this interface. +type ClusterTrustBundlesGetter interface { + ClusterTrustBundles() ClusterTrustBundleInterface +} + +// ClusterTrustBundleInterface has methods to work with ClusterTrustBundle resources. +type ClusterTrustBundleInterface interface { + Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (*v1alpha1.ClusterTrustBundle, error) + Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (*v1alpha1.ClusterTrustBundle, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterTrustBundle, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterTrustBundleList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error) + Apply(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterTrustBundle, err error) + ClusterTrustBundleExpansion +} + +// clusterTrustBundles implements ClusterTrustBundleInterface +type clusterTrustBundles struct { + client rest.Interface +} + +// newClusterTrustBundles returns a ClusterTrustBundles +func newClusterTrustBundles(c *CertificatesV1alpha1Client) *clusterTrustBundles { + return &clusterTrustBundles{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterTrustBundle, and returns the corresponding clusterTrustBundle object, and an error if there is any. +func (c *clusterTrustBundles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + result = &v1alpha1.ClusterTrustBundle{} + err = c.client.Get(). + Resource("clustertrustbundles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterTrustBundles that match those selectors. +func (c *clusterTrustBundles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterTrustBundleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ClusterTrustBundleList{} + err = c.client.Get(). + Resource("clustertrustbundles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterTrustBundles. +func (c *clusterTrustBundles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clustertrustbundles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a clusterTrustBundle and creates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. +func (c *clusterTrustBundles) Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + result = &v1alpha1.ClusterTrustBundle{} + err = c.client.Post(). + Resource("clustertrustbundles"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterTrustBundle). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a clusterTrustBundle and updates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. +func (c *clusterTrustBundles) Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + result = &v1alpha1.ClusterTrustBundle{} + err = c.client.Put(). + Resource("clustertrustbundles"). + Name(clusterTrustBundle.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterTrustBundle). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the clusterTrustBundle and deletes it. Returns an error if one occurs. +func (c *clusterTrustBundles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clustertrustbundles"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterTrustBundles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clustertrustbundles"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched clusterTrustBundle. +func (c *clusterTrustBundles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error) { + result = &v1alpha1.ClusterTrustBundle{} + err = c.client.Patch(pt). + Resource("clustertrustbundles"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied clusterTrustBundle. +func (c *clusterTrustBundles) Apply(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + if clusterTrustBundle == nil { + return nil, fmt.Errorf("clusterTrustBundle provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(clusterTrustBundle) + if err != nil { + return nil, err + } + name := clusterTrustBundle.Name + if name == nil { + return nil, fmt.Errorf("clusterTrustBundle.Name must be provided to Apply") + } + result = &v1alpha1.ClusterTrustBundle{} + err = c.client.Patch(types.ApplyPatchType). + Resource("clustertrustbundles"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/doc.go new file mode 100644 index 000000000..df51baa4d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..43cc534b3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type ClusterTrustBundleExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go index 7213193bf..562f8d5e4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go @@ -82,7 +82,7 @@ func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, // It returns the copy of the event that the server returns, or an error. // The namespace and name of the target event is deduced from the event. // The namespace must either match this event client's namespace, or this event client must -// have been created with the "" namespace. +// have been created with the "" namespace. func (e *events) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) { if e.ns != "" && event.Namespace != e.ns { return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index 827b514df..4725d2cd1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -32,7 +32,6 @@ type ExtensionsV1beta1Interface interface { DeploymentsGetter IngressesGetter NetworkPoliciesGetter - PodSecurityPoliciesGetter ReplicaSetsGetter } @@ -57,10 +56,6 @@ func (c *ExtensionsV1beta1Client) NetworkPolicies(namespace string) NetworkPolic return newNetworkPolicies(c, namespace) } -func (c *ExtensionsV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { - return newPodSecurityPolicies(c) -} - func (c *ExtensionsV1beta1Client) ReplicaSets(namespace string) ReplicaSetInterface { return newReplicaSets(c, namespace) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go index 41d28f041..67fcf4992 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go @@ -24,6 +24,4 @@ type IngressExpansion interface{} type NetworkPolicyExpansion interface{} -type PodSecurityPolicyExpansion interface{} - type ReplicaSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go index f24099b90..978b26db0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go @@ -43,7 +43,6 @@ type NetworkPoliciesGetter interface { type NetworkPolicyInterface interface { Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (*v1beta1.NetworkPolicy, error) Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (*v1beta1.NetworkPolicy, error) - UpdateStatus(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (*v1beta1.NetworkPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.NetworkPolicy, error) @@ -51,7 +50,6 @@ type NetworkPolicyInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) Apply(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.NetworkPolicy, err error) - ApplyStatus(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.NetworkPolicy, err error) NetworkPolicyExpansion } @@ -141,22 +139,6 @@ func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1beta1.Net return } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *networkPolicies) UpdateStatus(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Put(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(networkPolicy.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(networkPolicy). - Do(ctx). - Into(result) - return -} - // Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. func (c *networkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { return c.client.Delete(). @@ -224,33 +206,3 @@ func (c *networkPolicies) Apply(ctx context.Context, networkPolicy *extensionsv1 Into(result) return } - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *networkPolicies) ApplyStatus(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.NetworkPolicy, err error) { - if networkPolicy == nil { - return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(networkPolicy) - if err != nil { - return nil, err - } - - name := networkPolicy.Name - if name == nil { - return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") - } - - result = &v1beta1.NetworkPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("networkpolicies"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go deleted file mode 100644 index 3f38c3133..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. -// A group's client should implement this interface. -type PodSecurityPoliciesGetter interface { - PodSecurityPolicies() PodSecurityPolicyInterface -} - -// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. -type PodSecurityPolicyInterface interface { - Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (*v1beta1.PodSecurityPolicy, error) - Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (*v1beta1.PodSecurityPolicy, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) - Apply(ctx context.Context, podSecurityPolicy *extensionsv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) - PodSecurityPolicyExpansion -} - -// podSecurityPolicies implements PodSecurityPolicyInterface -type podSecurityPolicies struct { - client rest.Interface -} - -// newPodSecurityPolicies returns a PodSecurityPolicies -func newPodSecurityPolicies(c *ExtensionsV1beta1Client) *podSecurityPolicies { - return &podSecurityPolicies{ - client: c.RESTClient(), - } -} - -// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *podSecurityPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *podSecurityPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PodSecurityPolicyList{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *podSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Post(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSecurityPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Put(). - Resource("podsecuritypolicies"). - Name(podSecurityPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSecurityPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *podSecurityPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("podsecuritypolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podSecurityPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("podsecuritypolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podSecurityPolicy. -func (c *podSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Patch(pt). - Resource("podsecuritypolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podSecurityPolicy. -func (c *podSecurityPolicies) Apply(ctx context.Context, podSecurityPolicy *extensionsv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) { - if podSecurityPolicy == nil { - return nil, fmt.Errorf("podSecurityPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podSecurityPolicy) - if err != nil { - return nil, err - } - name := podSecurityPolicy.Name - if name == nil { - return nil, fmt.Errorf("podSecurityPolicy.Name must be provided to Apply") - } - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("podsecuritypolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/doc.go new file mode 100644 index 000000000..09b1d25b0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta3 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go new file mode 100644 index 000000000..461120bd3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go @@ -0,0 +1,112 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta3 + +import ( + "net/http" + + v1beta3 "k8s.io/api/flowcontrol/v1beta3" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type FlowcontrolV1beta3Interface interface { + RESTClient() rest.Interface + FlowSchemasGetter + PriorityLevelConfigurationsGetter +} + +// FlowcontrolV1beta3Client is used to interact with features provided by the flowcontrol.apiserver.k8s.io group. +type FlowcontrolV1beta3Client struct { + restClient rest.Interface +} + +func (c *FlowcontrolV1beta3Client) FlowSchemas() FlowSchemaInterface { + return newFlowSchemas(c) +} + +func (c *FlowcontrolV1beta3Client) PriorityLevelConfigurations() PriorityLevelConfigurationInterface { + return newPriorityLevelConfigurations(c) +} + +// NewForConfig creates a new FlowcontrolV1beta3Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*FlowcontrolV1beta3Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new FlowcontrolV1beta3Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowcontrolV1beta3Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &FlowcontrolV1beta3Client{client}, nil +} + +// NewForConfigOrDie creates a new FlowcontrolV1beta3Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1beta3Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new FlowcontrolV1beta3Client for the given RESTClient. +func New(c rest.Interface) *FlowcontrolV1beta3Client { + return &FlowcontrolV1beta3Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta3.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FlowcontrolV1beta3Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go new file mode 100644 index 000000000..5fa39d6ba --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go @@ -0,0 +1,243 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta3 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1beta3 "k8s.io/api/flowcontrol/v1beta3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// FlowSchemasGetter has a method to return a FlowSchemaInterface. +// A group's client should implement this interface. +type FlowSchemasGetter interface { + FlowSchemas() FlowSchemaInterface +} + +// FlowSchemaInterface has methods to work with FlowSchema resources. +type FlowSchemaInterface interface { + Create(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.CreateOptions) (*v1beta3.FlowSchema, error) + Update(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (*v1beta3.FlowSchema, error) + UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (*v1beta3.FlowSchema, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta3.FlowSchema, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta3.FlowSchemaList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.FlowSchema, err error) + Apply(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) + ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) + FlowSchemaExpansion +} + +// flowSchemas implements FlowSchemaInterface +type flowSchemas struct { + client rest.Interface +} + +// newFlowSchemas returns a FlowSchemas +func newFlowSchemas(c *FlowcontrolV1beta3Client) *flowSchemas { + return &flowSchemas{ + client: c.RESTClient(), + } +} + +// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. +func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.FlowSchema, err error) { + result = &v1beta3.FlowSchema{} + err = c.client.Get(). + Resource("flowschemas"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. +func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.FlowSchemaList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta3.FlowSchemaList{} + err = c.client.Get(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested flowSchemas. +func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.CreateOptions) (result *v1beta3.FlowSchema, err error) { + result = &v1beta3.FlowSchema{} + err = c.client.Post(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(flowSchema). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (result *v1beta3.FlowSchema, err error) { + result = &v1beta3.FlowSchema{} + err = c.client.Put(). + Resource("flowschemas"). + Name(flowSchema.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(flowSchema). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (result *v1beta3.FlowSchema, err error) { + result = &v1beta3.FlowSchema{} + err = c.client.Put(). + Resource("flowschemas"). + Name(flowSchema.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(flowSchema). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. +func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("flowschemas"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("flowschemas"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched flowSchema. +func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.FlowSchema, err error) { + result = &v1beta3.FlowSchema{} + err = c.client.Patch(pt). + Resource("flowschemas"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. +func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) { + if flowSchema == nil { + return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(flowSchema) + if err != nil { + return nil, err + } + name := flowSchema.Name + if name == nil { + return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") + } + result = &v1beta3.FlowSchema{} + err = c.client.Patch(types.ApplyPatchType). + Resource("flowschemas"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) { + if flowSchema == nil { + return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(flowSchema) + if err != nil { + return nil, err + } + + name := flowSchema.Name + if name == nil { + return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") + } + + result = &v1beta3.FlowSchema{} + err = c.client.Patch(types.ApplyPatchType). + Resource("flowschemas"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/generated_expansion.go new file mode 100644 index 000000000..51ae8d1f3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta3 + +type FlowSchemaExpansion interface{} + +type PriorityLevelConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go new file mode 100644 index 000000000..49f05257c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go @@ -0,0 +1,243 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta3 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1beta3 "k8s.io/api/flowcontrol/v1beta3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. +// A group's client should implement this interface. +type PriorityLevelConfigurationsGetter interface { + PriorityLevelConfigurations() PriorityLevelConfigurationInterface +} + +// PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. +type PriorityLevelConfigurationInterface interface { + Create(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta3.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error) + UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta3.PriorityLevelConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta3.PriorityLevelConfigurationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.PriorityLevelConfiguration, err error) + Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) + ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) + PriorityLevelConfigurationExpansion +} + +// priorityLevelConfigurations implements PriorityLevelConfigurationInterface +type priorityLevelConfigurations struct { + client rest.Interface +} + +// newPriorityLevelConfigurations returns a PriorityLevelConfigurations +func newPriorityLevelConfigurations(c *FlowcontrolV1beta3Client) *priorityLevelConfigurations { + return &priorityLevelConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. +func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { + result = &v1beta3.PriorityLevelConfiguration{} + err = c.client.Get(). + Resource("prioritylevelconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. +func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.PriorityLevelConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta3.PriorityLevelConfigurationList{} + err = c.client.Get(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. +func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { + result = &v1beta3.PriorityLevelConfiguration{} + err = c.client.Post(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(priorityLevelConfiguration). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { + result = &v1beta3.PriorityLevelConfiguration{} + err = c.client.Put(). + Resource("prioritylevelconfigurations"). + Name(priorityLevelConfiguration.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(priorityLevelConfiguration). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { + result = &v1beta3.PriorityLevelConfiguration{} + err = c.client.Put(). + Resource("prioritylevelconfigurations"). + Name(priorityLevelConfiguration.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(priorityLevelConfiguration). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. +func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("prioritylevelconfigurations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("prioritylevelconfigurations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched priorityLevelConfiguration. +func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.PriorityLevelConfiguration, err error) { + result = &v1beta3.PriorityLevelConfiguration{} + err = c.client.Patch(pt). + Resource("prioritylevelconfigurations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. +func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { + if priorityLevelConfiguration == nil { + return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(priorityLevelConfiguration) + if err != nil { + return nil, err + } + name := priorityLevelConfiguration.Name + if name == nil { + return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") + } + result = &v1beta3.PriorityLevelConfiguration{} + err = c.client.Patch(types.ApplyPatchType). + Resource("prioritylevelconfigurations"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { + if priorityLevelConfiguration == nil { + return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(priorityLevelConfiguration) + if err != nil { + return nil, err + } + + name := priorityLevelConfiguration.Name + if name == nil { + return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") + } + + result = &v1beta3.PriorityLevelConfiguration{} + err = c.client.Patch(types.ApplyPatchType). + Resource("prioritylevelconfigurations"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go index 97afd6278..d7454ce14 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go @@ -43,7 +43,6 @@ type NetworkPoliciesGetter interface { type NetworkPolicyInterface interface { Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (*v1.NetworkPolicy, error) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (*v1.NetworkPolicy, error) - UpdateStatus(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (*v1.NetworkPolicy, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.NetworkPolicy, error) @@ -51,7 +50,6 @@ type NetworkPolicyInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) Apply(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error) - ApplyStatus(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error) NetworkPolicyExpansion } @@ -141,22 +139,6 @@ func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkP return } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *networkPolicies) UpdateStatus(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Put(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(networkPolicy.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(networkPolicy). - Do(ctx). - Into(result) - return -} - // Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. func (c *networkPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { return c.client.Delete(). @@ -224,33 +206,3 @@ func (c *networkPolicies) Apply(ctx context.Context, networkPolicy *networkingv1 Into(result) return } - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *networkPolicies) ApplyStatus(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error) { - if networkPolicy == nil { - return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(networkPolicy) - if err != nil { - return nil, err - } - - name := networkPolicy.Name - if name == nil { - return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") - } - - result = &v1.NetworkPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("networkpolicies"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go new file mode 100644 index 000000000..9df76351d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go @@ -0,0 +1,197 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "k8s.io/api/networking/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ClusterCIDRsGetter has a method to return a ClusterCIDRInterface. +// A group's client should implement this interface. +type ClusterCIDRsGetter interface { + ClusterCIDRs() ClusterCIDRInterface +} + +// ClusterCIDRInterface has methods to work with ClusterCIDR resources. +type ClusterCIDRInterface interface { + Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (*v1alpha1.ClusterCIDR, error) + Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (*v1alpha1.ClusterCIDR, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterCIDR, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterCIDRList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) + Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) + ClusterCIDRExpansion +} + +// clusterCIDRs implements ClusterCIDRInterface +type clusterCIDRs struct { + client rest.Interface +} + +// newClusterCIDRs returns a ClusterCIDRs +func newClusterCIDRs(c *NetworkingV1alpha1Client) *clusterCIDRs { + return &clusterCIDRs{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterCIDR, and returns the corresponding clusterCIDR object, and an error if there is any. +func (c *clusterCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterCIDR, err error) { + result = &v1alpha1.ClusterCIDR{} + err = c.client.Get(). + Resource("clustercidrs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterCIDRs that match those selectors. +func (c *clusterCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterCIDRList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ClusterCIDRList{} + err = c.client.Get(). + Resource("clustercidrs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterCIDRs. +func (c *clusterCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clustercidrs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a clusterCIDR and creates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. +func (c *clusterCIDRs) Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (result *v1alpha1.ClusterCIDR, err error) { + result = &v1alpha1.ClusterCIDR{} + err = c.client.Post(). + Resource("clustercidrs"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterCIDR). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a clusterCIDR and updates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. +func (c *clusterCIDRs) Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (result *v1alpha1.ClusterCIDR, err error) { + result = &v1alpha1.ClusterCIDR{} + err = c.client.Put(). + Resource("clustercidrs"). + Name(clusterCIDR.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterCIDR). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the clusterCIDR and deletes it. Returns an error if one occurs. +func (c *clusterCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clustercidrs"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clustercidrs"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched clusterCIDR. +func (c *clusterCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) { + result = &v1alpha1.ClusterCIDR{} + err = c.client.Patch(pt). + Resource("clustercidrs"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied clusterCIDR. +func (c *clusterCIDRs) Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) { + if clusterCIDR == nil { + return nil, fmt.Errorf("clusterCIDR provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(clusterCIDR) + if err != nil { + return nil, err + } + name := clusterCIDR.Name + if name == nil { + return nil, fmt.Errorf("clusterCIDR.Name must be provided to Apply") + } + result = &v1alpha1.ClusterCIDR{} + err = c.client.Patch(types.ApplyPatchType). + Resource("clustercidrs"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/doc.go new file mode 100644 index 000000000..df51baa4d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..9c2979d6c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type ClusterCIDRExpansion interface{} + +type IPAddressExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go new file mode 100644 index 000000000..fff193d68 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go @@ -0,0 +1,197 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "k8s.io/api/networking/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// IPAddressesGetter has a method to return a IPAddressInterface. +// A group's client should implement this interface. +type IPAddressesGetter interface { + IPAddresses() IPAddressInterface +} + +// IPAddressInterface has methods to work with IPAddress resources. +type IPAddressInterface interface { + Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (*v1alpha1.IPAddress, error) + Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (*v1alpha1.IPAddress, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.IPAddress, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.IPAddressList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error) + Apply(ctx context.Context, iPAddress *networkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IPAddress, err error) + IPAddressExpansion +} + +// iPAddresses implements IPAddressInterface +type iPAddresses struct { + client rest.Interface +} + +// newIPAddresses returns a IPAddresses +func newIPAddresses(c *NetworkingV1alpha1Client) *iPAddresses { + return &iPAddresses{ + client: c.RESTClient(), + } +} + +// Get takes name of the iPAddress, and returns the corresponding iPAddress object, and an error if there is any. +func (c *iPAddresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IPAddress, err error) { + result = &v1alpha1.IPAddress{} + err = c.client.Get(). + Resource("ipaddresses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of IPAddresses that match those selectors. +func (c *iPAddresses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IPAddressList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.IPAddressList{} + err = c.client.Get(). + Resource("ipaddresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested iPAddresses. +func (c *iPAddresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("ipaddresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a iPAddress and creates it. Returns the server's representation of the iPAddress, and an error, if there is any. +func (c *iPAddresses) Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (result *v1alpha1.IPAddress, err error) { + result = &v1alpha1.IPAddress{} + err = c.client.Post(). + Resource("ipaddresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(iPAddress). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a iPAddress and updates it. Returns the server's representation of the iPAddress, and an error, if there is any. +func (c *iPAddresses) Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (result *v1alpha1.IPAddress, err error) { + result = &v1alpha1.IPAddress{} + err = c.client.Put(). + Resource("ipaddresses"). + Name(iPAddress.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(iPAddress). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the iPAddress and deletes it. Returns an error if one occurs. +func (c *iPAddresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("ipaddresses"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *iPAddresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("ipaddresses"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched iPAddress. +func (c *iPAddresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error) { + result = &v1alpha1.IPAddress{} + err = c.client.Patch(pt). + Resource("ipaddresses"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied iPAddress. +func (c *iPAddresses) Apply(ctx context.Context, iPAddress *networkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IPAddress, err error) { + if iPAddress == nil { + return nil, fmt.Errorf("iPAddress provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(iPAddress) + if err != nil { + return nil, err + } + name := iPAddress.Name + if name == nil { + return nil, fmt.Errorf("iPAddress.Name must be provided to Apply") + } + result = &v1alpha1.IPAddress{} + err = c.client.Patch(types.ApplyPatchType). + Resource("ipaddresses"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go new file mode 100644 index 000000000..884c846f5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go @@ -0,0 +1,112 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "k8s.io/api/networking/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type NetworkingV1alpha1Interface interface { + RESTClient() rest.Interface + ClusterCIDRsGetter + IPAddressesGetter +} + +// NetworkingV1alpha1Client is used to interact with features provided by the networking.k8s.io group. +type NetworkingV1alpha1Client struct { + restClient rest.Interface +} + +func (c *NetworkingV1alpha1Client) ClusterCIDRs() ClusterCIDRInterface { + return newClusterCIDRs(c) +} + +func (c *NetworkingV1alpha1Client) IPAddresses() IPAddressInterface { + return newIPAddresses(c) +} + +// NewForConfig creates a new NetworkingV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*NetworkingV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new NetworkingV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NetworkingV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &NetworkingV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new NetworkingV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NetworkingV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NetworkingV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *NetworkingV1alpha1Client { + return &NetworkingV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NetworkingV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go new file mode 100644 index 000000000..baaf2d985 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha2 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go new file mode 100644 index 000000000..2c02e9ce7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +type PodSchedulingContextExpansion interface{} + +type ResourceClaimExpansion interface{} + +type ResourceClaimTemplateExpansion interface{} + +type ResourceClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go new file mode 100644 index 000000000..72e81a29e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go @@ -0,0 +1,256 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha2 "k8s.io/api/resource/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PodSchedulingContextsGetter has a method to return a PodSchedulingContextInterface. +// A group's client should implement this interface. +type PodSchedulingContextsGetter interface { + PodSchedulingContexts(namespace string) PodSchedulingContextInterface +} + +// PodSchedulingContextInterface has methods to work with PodSchedulingContext resources. +type PodSchedulingContextInterface interface { + Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (*v1alpha2.PodSchedulingContext, error) + Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error) + UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.PodSchedulingContext, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.PodSchedulingContextList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) + Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) + ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) + PodSchedulingContextExpansion +} + +// podSchedulingContexts implements PodSchedulingContextInterface +type podSchedulingContexts struct { + client rest.Interface + ns string +} + +// newPodSchedulingContexts returns a PodSchedulingContexts +func newPodSchedulingContexts(c *ResourceV1alpha2Client, namespace string) *podSchedulingContexts { + return &podSchedulingContexts{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the podSchedulingContext, and returns the corresponding podSchedulingContext object, and an error if there is any. +func (c *podSchedulingContexts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.PodSchedulingContext, err error) { + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodSchedulingContexts that match those selectors. +func (c *podSchedulingContexts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.PodSchedulingContextList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha2.PodSchedulingContextList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podSchedulingContexts. +func (c *podSchedulingContexts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a podSchedulingContext and creates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. +func (c *podSchedulingContexts) Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (result *v1alpha2.PodSchedulingContext, err error) { + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podSchedulingContext). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a podSchedulingContext and updates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. +func (c *podSchedulingContexts) Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) { + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(podSchedulingContext.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podSchedulingContext). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *podSchedulingContexts) UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) { + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(podSchedulingContext.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podSchedulingContext). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the podSchedulingContext and deletes it. Returns an error if one occurs. +func (c *podSchedulingContexts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podSchedulingContexts) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched podSchedulingContext. +func (c *podSchedulingContexts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) { + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied podSchedulingContext. +func (c *podSchedulingContexts) Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) { + if podSchedulingContext == nil { + return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(podSchedulingContext) + if err != nil { + return nil, err + } + name := podSchedulingContext.Name + if name == nil { + return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") + } + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *podSchedulingContexts) ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) { + if podSchedulingContext == nil { + return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(podSchedulingContext) + if err != nil { + return nil, err + } + + name := podSchedulingContext.Name + if name == nil { + return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") + } + + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go new file mode 100644 index 000000000..d5795fd62 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go @@ -0,0 +1,122 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "net/http" + + v1alpha2 "k8s.io/api/resource/v1alpha2" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type ResourceV1alpha2Interface interface { + RESTClient() rest.Interface + PodSchedulingContextsGetter + ResourceClaimsGetter + ResourceClaimTemplatesGetter + ResourceClassesGetter +} + +// ResourceV1alpha2Client is used to interact with features provided by the resource.k8s.io group. +type ResourceV1alpha2Client struct { + restClient rest.Interface +} + +func (c *ResourceV1alpha2Client) PodSchedulingContexts(namespace string) PodSchedulingContextInterface { + return newPodSchedulingContexts(c, namespace) +} + +func (c *ResourceV1alpha2Client) ResourceClaims(namespace string) ResourceClaimInterface { + return newResourceClaims(c, namespace) +} + +func (c *ResourceV1alpha2Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface { + return newResourceClaimTemplates(c, namespace) +} + +func (c *ResourceV1alpha2Client) ResourceClasses() ResourceClassInterface { + return newResourceClasses(c) +} + +// NewForConfig creates a new ResourceV1alpha2Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*ResourceV1alpha2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ResourceV1alpha2Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &ResourceV1alpha2Client{client}, nil +} + +// NewForConfigOrDie creates a new ResourceV1alpha2Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha2Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ResourceV1alpha2Client for the given RESTClient. +func New(c rest.Interface) *ResourceV1alpha2Client { + return &ResourceV1alpha2Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha2.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ResourceV1alpha2Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go new file mode 100644 index 000000000..cfb27c9db --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go @@ -0,0 +1,256 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha2 "k8s.io/api/resource/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ResourceClaimsGetter has a method to return a ResourceClaimInterface. +// A group's client should implement this interface. +type ResourceClaimsGetter interface { + ResourceClaims(namespace string) ResourceClaimInterface +} + +// ResourceClaimInterface has methods to work with ResourceClaim resources. +type ResourceClaimInterface interface { + Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (*v1alpha2.ResourceClaim, error) + Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) + UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaim, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) + Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) + ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) + ResourceClaimExpansion +} + +// resourceClaims implements ResourceClaimInterface +type resourceClaims struct { + client rest.Interface + ns string +} + +// newResourceClaims returns a ResourceClaims +func newResourceClaims(c *ResourceV1alpha2Client, namespace string) *resourceClaims { + return &resourceClaims{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the resourceClaim, and returns the corresponding resourceClaim object, and an error if there is any. +func (c *resourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourceclaims"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ResourceClaims that match those selectors. +func (c *resourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha2.ResourceClaimList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourceclaims"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resourceClaims. +func (c *resourceClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("resourceclaims"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a resourceClaim and creates it. Returns the server's representation of the resourceClaim, and an error, if there is any. +func (c *resourceClaims) Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} + err = c.client.Post(). + Namespace(c.ns). + Resource("resourceclaims"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceClaim). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a resourceClaim and updates it. Returns the server's representation of the resourceClaim, and an error, if there is any. +func (c *resourceClaims) Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourceclaims"). + Name(resourceClaim.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceClaim). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *resourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourceclaims"). + Name(resourceClaim.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceClaim). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the resourceClaim and deletes it. Returns an error if one occurs. +func (c *resourceClaims) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourceclaims"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *resourceClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("resourceclaims"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched resourceClaim. +func (c *resourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("resourceclaims"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaim. +func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { + if resourceClaim == nil { + return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(resourceClaim) + if err != nil { + return nil, err + } + name := resourceClaim.Name + if name == nil { + return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") + } + result = &v1alpha2.ResourceClaim{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("resourceclaims"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *resourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { + if resourceClaim == nil { + return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(resourceClaim) + if err != nil { + return nil, err + } + + name := resourceClaim.Name + if name == nil { + return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") + } + + result = &v1alpha2.ResourceClaim{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("resourceclaims"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go new file mode 100644 index 000000000..3f4e32006 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go @@ -0,0 +1,208 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha2 "k8s.io/api/resource/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ResourceClaimTemplatesGetter has a method to return a ResourceClaimTemplateInterface. +// A group's client should implement this interface. +type ResourceClaimTemplatesGetter interface { + ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface +} + +// ResourceClaimTemplateInterface has methods to work with ResourceClaimTemplate resources. +type ResourceClaimTemplateInterface interface { + Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (*v1alpha2.ResourceClaimTemplate, error) + Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (*v1alpha2.ResourceClaimTemplate, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaimTemplate, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimTemplateList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) + Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) + ResourceClaimTemplateExpansion +} + +// resourceClaimTemplates implements ResourceClaimTemplateInterface +type resourceClaimTemplates struct { + client rest.Interface + ns string +} + +// newResourceClaimTemplates returns a ResourceClaimTemplates +func newResourceClaimTemplates(c *ResourceV1alpha2Client, namespace string) *resourceClaimTemplates { + return &resourceClaimTemplates{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the resourceClaimTemplate, and returns the corresponding resourceClaimTemplate object, and an error if there is any. +func (c *resourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { + result = &v1alpha2.ResourceClaimTemplate{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourceclaimtemplates"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ResourceClaimTemplates that match those selectors. +func (c *resourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimTemplateList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha2.ResourceClaimTemplateList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourceclaimtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resourceClaimTemplates. +func (c *resourceClaimTemplates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("resourceclaimtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a resourceClaimTemplate and creates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. +func (c *resourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { + result = &v1alpha2.ResourceClaimTemplate{} + err = c.client.Post(). + Namespace(c.ns). + Resource("resourceclaimtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceClaimTemplate). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a resourceClaimTemplate and updates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. +func (c *resourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { + result = &v1alpha2.ResourceClaimTemplate{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourceclaimtemplates"). + Name(resourceClaimTemplate.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceClaimTemplate). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the resourceClaimTemplate and deletes it. Returns an error if one occurs. +func (c *resourceClaimTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourceclaimtemplates"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *resourceClaimTemplates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("resourceclaimtemplates"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched resourceClaimTemplate. +func (c *resourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) { + result = &v1alpha2.ResourceClaimTemplate{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("resourceclaimtemplates"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimTemplate. +func (c *resourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { + if resourceClaimTemplate == nil { + return nil, fmt.Errorf("resourceClaimTemplate provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(resourceClaimTemplate) + if err != nil { + return nil, err + } + name := resourceClaimTemplate.Name + if name == nil { + return nil, fmt.Errorf("resourceClaimTemplate.Name must be provided to Apply") + } + result = &v1alpha2.ResourceClaimTemplate{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("resourceclaimtemplates"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go new file mode 100644 index 000000000..95a4ac566 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go @@ -0,0 +1,197 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha2 "k8s.io/api/resource/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ResourceClassesGetter has a method to return a ResourceClassInterface. +// A group's client should implement this interface. +type ResourceClassesGetter interface { + ResourceClasses() ResourceClassInterface +} + +// ResourceClassInterface has methods to work with ResourceClass resources. +type ResourceClassInterface interface { + Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (*v1alpha2.ResourceClass, error) + Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (*v1alpha2.ResourceClass, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClass, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClassList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) + Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) + ResourceClassExpansion +} + +// resourceClasses implements ResourceClassInterface +type resourceClasses struct { + client rest.Interface +} + +// newResourceClasses returns a ResourceClasses +func newResourceClasses(c *ResourceV1alpha2Client) *resourceClasses { + return &resourceClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the resourceClass, and returns the corresponding resourceClass object, and an error if there is any. +func (c *resourceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClass, err error) { + result = &v1alpha2.ResourceClass{} + err = c.client.Get(). + Resource("resourceclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ResourceClasses that match those selectors. +func (c *resourceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha2.ResourceClassList{} + err = c.client.Get(). + Resource("resourceclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resourceClasses. +func (c *resourceClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("resourceclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a resourceClass and creates it. Returns the server's representation of the resourceClass, and an error, if there is any. +func (c *resourceClasses) Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (result *v1alpha2.ResourceClass, err error) { + result = &v1alpha2.ResourceClass{} + err = c.client.Post(). + Resource("resourceclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceClass). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a resourceClass and updates it. Returns the server's representation of the resourceClass, and an error, if there is any. +func (c *resourceClasses) Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (result *v1alpha2.ResourceClass, err error) { + result = &v1alpha2.ResourceClass{} + err = c.client.Put(). + Resource("resourceclasses"). + Name(resourceClass.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceClass). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the resourceClass and deletes it. Returns an error if one occurs. +func (c *resourceClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("resourceclasses"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *resourceClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("resourceclasses"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched resourceClass. +func (c *resourceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) { + result = &v1alpha2.ResourceClass{} + err = c.client.Patch(pt). + Resource("resourceclasses"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClass. +func (c *resourceClasses) Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) { + if resourceClass == nil { + return nil, fmt.Errorf("resourceClass provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(resourceClass) + if err != nil { + return nil, err + } + name := resourceClass.Name + if name == nil { + return nil, fmt.Errorf("resourceClass.Name must be provided to Apply") + } + result = &v1alpha2.ResourceClass{} + err = c.client.Patch(types.ApplyPatchType). + Resource("resourceclasses"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go new file mode 100644 index 000000000..3f8b7819c --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// ValidatingAdmissionPolicyListerExpansion allows custom methods to be added to +// ValidatingAdmissionPolicyLister. +type ValidatingAdmissionPolicyListerExpansion interface{} + +// ValidatingAdmissionPolicyBindingListerExpansion allows custom methods to be added to +// ValidatingAdmissionPolicyBindingLister. +type ValidatingAdmissionPolicyBindingListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go new file mode 100644 index 000000000..ae500183a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ValidatingAdmissionPolicyLister helps list ValidatingAdmissionPolicies. +// All objects returned here must be treated as read-only. +type ValidatingAdmissionPolicyLister interface { + // List lists all ValidatingAdmissionPolicies in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.ValidatingAdmissionPolicy, err error) + // Get retrieves the ValidatingAdmissionPolicy from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.ValidatingAdmissionPolicy, error) + ValidatingAdmissionPolicyListerExpansion +} + +// validatingAdmissionPolicyLister implements the ValidatingAdmissionPolicyLister interface. +type validatingAdmissionPolicyLister struct { + indexer cache.Indexer +} + +// NewValidatingAdmissionPolicyLister returns a new ValidatingAdmissionPolicyLister. +func NewValidatingAdmissionPolicyLister(indexer cache.Indexer) ValidatingAdmissionPolicyLister { + return &validatingAdmissionPolicyLister{indexer: indexer} +} + +// List lists all ValidatingAdmissionPolicies in the indexer. +func (s *validatingAdmissionPolicyLister) List(selector labels.Selector) (ret []*v1alpha1.ValidatingAdmissionPolicy, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ValidatingAdmissionPolicy)) + }) + return ret, err +} + +// Get retrieves the ValidatingAdmissionPolicy from the index for a given name. +func (s *validatingAdmissionPolicyLister) Get(name string) (*v1alpha1.ValidatingAdmissionPolicy, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("validatingadmissionpolicy"), name) + } + return obj.(*v1alpha1.ValidatingAdmissionPolicy), nil +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go new file mode 100644 index 000000000..552854daf --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ValidatingAdmissionPolicyBindingLister helps list ValidatingAdmissionPolicyBindings. +// All objects returned here must be treated as read-only. +type ValidatingAdmissionPolicyBindingLister interface { + // List lists all ValidatingAdmissionPolicyBindings in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.ValidatingAdmissionPolicyBinding, err error) + // Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.ValidatingAdmissionPolicyBinding, error) + ValidatingAdmissionPolicyBindingListerExpansion +} + +// validatingAdmissionPolicyBindingLister implements the ValidatingAdmissionPolicyBindingLister interface. +type validatingAdmissionPolicyBindingLister struct { + indexer cache.Indexer +} + +// NewValidatingAdmissionPolicyBindingLister returns a new ValidatingAdmissionPolicyBindingLister. +func NewValidatingAdmissionPolicyBindingLister(indexer cache.Indexer) ValidatingAdmissionPolicyBindingLister { + return &validatingAdmissionPolicyBindingLister{indexer: indexer} +} + +// List lists all ValidatingAdmissionPolicyBindings in the indexer. +func (s *validatingAdmissionPolicyBindingLister) List(selector labels.Selector) (ret []*v1alpha1.ValidatingAdmissionPolicyBinding, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ValidatingAdmissionPolicyBinding)) + }) + return ret, err +} + +// Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name. +func (s *validatingAdmissionPolicyBindingLister) Get(name string) (*v1alpha1.ValidatingAdmissionPolicyBinding, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("validatingadmissionpolicybinding"), name) + } + return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), nil +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go index 8960abc4f..7148781f4 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go @@ -22,6 +22,14 @@ package v1beta1 // MutatingWebhookConfigurationLister. type MutatingWebhookConfigurationListerExpansion interface{} +// ValidatingAdmissionPolicyListerExpansion allows custom methods to be added to +// ValidatingAdmissionPolicyLister. +type ValidatingAdmissionPolicyListerExpansion interface{} + +// ValidatingAdmissionPolicyBindingListerExpansion allows custom methods to be added to +// ValidatingAdmissionPolicyBindingLister. +type ValidatingAdmissionPolicyBindingListerExpansion interface{} + // ValidatingWebhookConfigurationListerExpansion allows custom methods to be added to // ValidatingWebhookConfigurationLister. type ValidatingWebhookConfigurationListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go new file mode 100644 index 000000000..7018b3cee --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ValidatingAdmissionPolicyLister helps list ValidatingAdmissionPolicies. +// All objects returned here must be treated as read-only. +type ValidatingAdmissionPolicyLister interface { + // List lists all ValidatingAdmissionPolicies in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.ValidatingAdmissionPolicy, err error) + // Get retrieves the ValidatingAdmissionPolicy from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta1.ValidatingAdmissionPolicy, error) + ValidatingAdmissionPolicyListerExpansion +} + +// validatingAdmissionPolicyLister implements the ValidatingAdmissionPolicyLister interface. +type validatingAdmissionPolicyLister struct { + indexer cache.Indexer +} + +// NewValidatingAdmissionPolicyLister returns a new ValidatingAdmissionPolicyLister. +func NewValidatingAdmissionPolicyLister(indexer cache.Indexer) ValidatingAdmissionPolicyLister { + return &validatingAdmissionPolicyLister{indexer: indexer} +} + +// List lists all ValidatingAdmissionPolicies in the indexer. +func (s *validatingAdmissionPolicyLister) List(selector labels.Selector) (ret []*v1beta1.ValidatingAdmissionPolicy, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ValidatingAdmissionPolicy)) + }) + return ret, err +} + +// Get retrieves the ValidatingAdmissionPolicy from the index for a given name. +func (s *validatingAdmissionPolicyLister) Get(name string) (*v1beta1.ValidatingAdmissionPolicy, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("validatingadmissionpolicy"), name) + } + return obj.(*v1beta1.ValidatingAdmissionPolicy), nil +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go new file mode 100644 index 000000000..5fcebfd22 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ValidatingAdmissionPolicyBindingLister helps list ValidatingAdmissionPolicyBindings. +// All objects returned here must be treated as read-only. +type ValidatingAdmissionPolicyBindingLister interface { + // List lists all ValidatingAdmissionPolicyBindings in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.ValidatingAdmissionPolicyBinding, err error) + // Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta1.ValidatingAdmissionPolicyBinding, error) + ValidatingAdmissionPolicyBindingListerExpansion +} + +// validatingAdmissionPolicyBindingLister implements the ValidatingAdmissionPolicyBindingLister interface. +type validatingAdmissionPolicyBindingLister struct { + indexer cache.Indexer +} + +// NewValidatingAdmissionPolicyBindingLister returns a new ValidatingAdmissionPolicyBindingLister. +func NewValidatingAdmissionPolicyBindingLister(indexer cache.Indexer) ValidatingAdmissionPolicyBindingLister { + return &validatingAdmissionPolicyBindingLister{indexer: indexer} +} + +// List lists all ValidatingAdmissionPolicyBindings in the indexer. +func (s *validatingAdmissionPolicyBindingLister) List(selector labels.Selector) (ret []*v1beta1.ValidatingAdmissionPolicyBinding, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ValidatingAdmissionPolicyBinding)) + }) + return ret, err +} + +// Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name. +func (s *validatingAdmissionPolicyBindingLister) Get(name string) (*v1beta1.ValidatingAdmissionPolicyBinding, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("validatingadmissionpolicybinding"), name) + } + return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), nil +} diff --git a/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go new file mode 100644 index 000000000..b8049a761 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/certificates/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterTrustBundleLister helps list ClusterTrustBundles. +// All objects returned here must be treated as read-only. +type ClusterTrustBundleLister interface { + // List lists all ClusterTrustBundles in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.ClusterTrustBundle, err error) + // Get retrieves the ClusterTrustBundle from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.ClusterTrustBundle, error) + ClusterTrustBundleListerExpansion +} + +// clusterTrustBundleLister implements the ClusterTrustBundleLister interface. +type clusterTrustBundleLister struct { + indexer cache.Indexer +} + +// NewClusterTrustBundleLister returns a new ClusterTrustBundleLister. +func NewClusterTrustBundleLister(indexer cache.Indexer) ClusterTrustBundleLister { + return &clusterTrustBundleLister{indexer: indexer} +} + +// List lists all ClusterTrustBundles in the indexer. +func (s *clusterTrustBundleLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterTrustBundle, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ClusterTrustBundle)) + }) + return ret, err +} + +// Get retrieves the ClusterTrustBundle from the index for a given name. +func (s *clusterTrustBundleLister) Get(name string) (*v1alpha1.ClusterTrustBundle, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("clustertrustbundle"), name) + } + return obj.(*v1alpha1.ClusterTrustBundle), nil +} diff --git a/vendor/k8s.io/client-go/listers/certificates/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/certificates/v1alpha1/expansion_generated.go new file mode 100644 index 000000000..d77258cb2 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/certificates/v1alpha1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// ClusterTrustBundleListerExpansion allows custom methods to be added to +// ClusterTrustBundleLister. +type ClusterTrustBundleListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go index 5599219d9..4c65dbf76 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go @@ -41,7 +41,3 @@ type NetworkPolicyListerExpansion interface{} // NetworkPolicyNamespaceListerExpansion allows custom methods to be added to // NetworkPolicyNamespaceLister. type NetworkPolicyNamespaceListerExpansion interface{} - -// PodSecurityPolicyListerExpansion allows custom methods to be added to -// PodSecurityPolicyLister. -type PodSecurityPolicyListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go deleted file mode 100644 index 5f6a8c036..000000000 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// PodSecurityPolicyLister helps list PodSecurityPolicies. -// All objects returned here must be treated as read-only. -type PodSecurityPolicyLister interface { - // List lists all PodSecurityPolicies in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.PodSecurityPolicy, err error) - // Get retrieves the PodSecurityPolicy from the index for a given name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.PodSecurityPolicy, error) - PodSecurityPolicyListerExpansion -} - -// podSecurityPolicyLister implements the PodSecurityPolicyLister interface. -type podSecurityPolicyLister struct { - indexer cache.Indexer -} - -// NewPodSecurityPolicyLister returns a new PodSecurityPolicyLister. -func NewPodSecurityPolicyLister(indexer cache.Indexer) PodSecurityPolicyLister { - return &podSecurityPolicyLister{indexer: indexer} -} - -// List lists all PodSecurityPolicies in the indexer. -func (s *podSecurityPolicyLister) List(selector labels.Selector) (ret []*v1beta1.PodSecurityPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.PodSecurityPolicy)) - }) - return ret, err -} - -// Get retrieves the PodSecurityPolicy from the index for a given name. -func (s *podSecurityPolicyLister) Get(name string) (*v1beta1.PodSecurityPolicy, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("podsecuritypolicy"), name) - } - return obj.(*v1beta1.PodSecurityPolicy), nil -} diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/expansion_generated.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/expansion_generated.go new file mode 100644 index 000000000..5c14f337b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta3 + +// FlowSchemaListerExpansion allows custom methods to be added to +// FlowSchemaLister. +type FlowSchemaListerExpansion interface{} + +// PriorityLevelConfigurationListerExpansion allows custom methods to be added to +// PriorityLevelConfigurationLister. +type PriorityLevelConfigurationListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go new file mode 100644 index 000000000..ef01b5a76 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta3 + +import ( + v1beta3 "k8s.io/api/flowcontrol/v1beta3" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// FlowSchemaLister helps list FlowSchemas. +// All objects returned here must be treated as read-only. +type FlowSchemaLister interface { + // List lists all FlowSchemas in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta3.FlowSchema, err error) + // Get retrieves the FlowSchema from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta3.FlowSchema, error) + FlowSchemaListerExpansion +} + +// flowSchemaLister implements the FlowSchemaLister interface. +type flowSchemaLister struct { + indexer cache.Indexer +} + +// NewFlowSchemaLister returns a new FlowSchemaLister. +func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { + return &flowSchemaLister{indexer: indexer} +} + +// List lists all FlowSchemas in the indexer. +func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1beta3.FlowSchema, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta3.FlowSchema)) + }) + return ret, err +} + +// Get retrieves the FlowSchema from the index for a given name. +func (s *flowSchemaLister) Get(name string) (*v1beta3.FlowSchema, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta3.Resource("flowschema"), name) + } + return obj.(*v1beta3.FlowSchema), nil +} diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go new file mode 100644 index 000000000..d05613949 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta3 + +import ( + v1beta3 "k8s.io/api/flowcontrol/v1beta3" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PriorityLevelConfigurationLister helps list PriorityLevelConfigurations. +// All objects returned here must be treated as read-only. +type PriorityLevelConfigurationLister interface { + // List lists all PriorityLevelConfigurations in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta3.PriorityLevelConfiguration, err error) + // Get retrieves the PriorityLevelConfiguration from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta3.PriorityLevelConfiguration, error) + PriorityLevelConfigurationListerExpansion +} + +// priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. +type priorityLevelConfigurationLister struct { + indexer cache.Indexer +} + +// NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. +func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { + return &priorityLevelConfigurationLister{indexer: indexer} +} + +// List lists all PriorityLevelConfigurations in the indexer. +func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1beta3.PriorityLevelConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta3.PriorityLevelConfiguration)) + }) + return ret, err +} + +// Get retrieves the PriorityLevelConfiguration from the index for a given name. +func (s *priorityLevelConfigurationLister) Get(name string) (*v1beta3.PriorityLevelConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta3.Resource("prioritylevelconfiguration"), name) + } + return obj.(*v1beta3.PriorityLevelConfiguration), nil +} diff --git a/vendor/k8s.io/client-go/listers/networking/v1alpha1/clustercidr.go b/vendor/k8s.io/client-go/listers/networking/v1alpha1/clustercidr.go new file mode 100644 index 000000000..dca9d7bf0 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/networking/v1alpha1/clustercidr.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/networking/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterCIDRLister helps list ClusterCIDRs. +// All objects returned here must be treated as read-only. +type ClusterCIDRLister interface { + // List lists all ClusterCIDRs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.ClusterCIDR, err error) + // Get retrieves the ClusterCIDR from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.ClusterCIDR, error) + ClusterCIDRListerExpansion +} + +// clusterCIDRLister implements the ClusterCIDRLister interface. +type clusterCIDRLister struct { + indexer cache.Indexer +} + +// NewClusterCIDRLister returns a new ClusterCIDRLister. +func NewClusterCIDRLister(indexer cache.Indexer) ClusterCIDRLister { + return &clusterCIDRLister{indexer: indexer} +} + +// List lists all ClusterCIDRs in the indexer. +func (s *clusterCIDRLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterCIDR, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ClusterCIDR)) + }) + return ret, err +} + +// Get retrieves the ClusterCIDR from the index for a given name. +func (s *clusterCIDRLister) Get(name string) (*v1alpha1.ClusterCIDR, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("clustercidr"), name) + } + return obj.(*v1alpha1.ClusterCIDR), nil +} diff --git a/vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go new file mode 100644 index 000000000..d57b71b00 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// ClusterCIDRListerExpansion allows custom methods to be added to +// ClusterCIDRLister. +type ClusterCIDRListerExpansion interface{} + +// IPAddressListerExpansion allows custom methods to be added to +// IPAddressLister. +type IPAddressListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go new file mode 100644 index 000000000..b3dfe2797 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/networking/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// IPAddressLister helps list IPAddresses. +// All objects returned here must be treated as read-only. +type IPAddressLister interface { + // List lists all IPAddresses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.IPAddress, err error) + // Get retrieves the IPAddress from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.IPAddress, error) + IPAddressListerExpansion +} + +// iPAddressLister implements the IPAddressLister interface. +type iPAddressLister struct { + indexer cache.Indexer +} + +// NewIPAddressLister returns a new IPAddressLister. +func NewIPAddressLister(indexer cache.Indexer) IPAddressLister { + return &iPAddressLister{indexer: indexer} +} + +// List lists all IPAddresses in the indexer. +func (s *iPAddressLister) List(selector labels.Selector) (ret []*v1alpha1.IPAddress, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.IPAddress)) + }) + return ret, err +} + +// Get retrieves the IPAddress from the index for a given name. +func (s *iPAddressLister) Get(name string) (*v1alpha1.IPAddress, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("ipaddress"), name) + } + return obj.(*v1alpha1.IPAddress), nil +} diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha2/expansion_generated.go b/vendor/k8s.io/client-go/listers/resource/v1alpha2/expansion_generated.go new file mode 100644 index 000000000..3b16e4429 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha2/expansion_generated.go @@ -0,0 +1,47 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +// PodSchedulingContextListerExpansion allows custom methods to be added to +// PodSchedulingContextLister. +type PodSchedulingContextListerExpansion interface{} + +// PodSchedulingContextNamespaceListerExpansion allows custom methods to be added to +// PodSchedulingContextNamespaceLister. +type PodSchedulingContextNamespaceListerExpansion interface{} + +// ResourceClaimListerExpansion allows custom methods to be added to +// ResourceClaimLister. +type ResourceClaimListerExpansion interface{} + +// ResourceClaimNamespaceListerExpansion allows custom methods to be added to +// ResourceClaimNamespaceLister. +type ResourceClaimNamespaceListerExpansion interface{} + +// ResourceClaimTemplateListerExpansion allows custom methods to be added to +// ResourceClaimTemplateLister. +type ResourceClaimTemplateListerExpansion interface{} + +// ResourceClaimTemplateNamespaceListerExpansion allows custom methods to be added to +// ResourceClaimTemplateNamespaceLister. +type ResourceClaimTemplateNamespaceListerExpansion interface{} + +// ResourceClassListerExpansion allows custom methods to be added to +// ResourceClassLister. +type ResourceClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha2/podschedulingcontext.go b/vendor/k8s.io/client-go/listers/resource/v1alpha2/podschedulingcontext.go new file mode 100644 index 000000000..c50b3f889 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha2/podschedulingcontext.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + v1alpha2 "k8s.io/api/resource/v1alpha2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodSchedulingContextLister helps list PodSchedulingContexts. +// All objects returned here must be treated as read-only. +type PodSchedulingContextLister interface { + // List lists all PodSchedulingContexts in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha2.PodSchedulingContext, err error) + // PodSchedulingContexts returns an object that can list and get PodSchedulingContexts. + PodSchedulingContexts(namespace string) PodSchedulingContextNamespaceLister + PodSchedulingContextListerExpansion +} + +// podSchedulingContextLister implements the PodSchedulingContextLister interface. +type podSchedulingContextLister struct { + indexer cache.Indexer +} + +// NewPodSchedulingContextLister returns a new PodSchedulingContextLister. +func NewPodSchedulingContextLister(indexer cache.Indexer) PodSchedulingContextLister { + return &podSchedulingContextLister{indexer: indexer} +} + +// List lists all PodSchedulingContexts in the indexer. +func (s *podSchedulingContextLister) List(selector labels.Selector) (ret []*v1alpha2.PodSchedulingContext, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.PodSchedulingContext)) + }) + return ret, err +} + +// PodSchedulingContexts returns an object that can list and get PodSchedulingContexts. +func (s *podSchedulingContextLister) PodSchedulingContexts(namespace string) PodSchedulingContextNamespaceLister { + return podSchedulingContextNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodSchedulingContextNamespaceLister helps list and get PodSchedulingContexts. +// All objects returned here must be treated as read-only. +type PodSchedulingContextNamespaceLister interface { + // List lists all PodSchedulingContexts in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha2.PodSchedulingContext, err error) + // Get retrieves the PodSchedulingContext from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha2.PodSchedulingContext, error) + PodSchedulingContextNamespaceListerExpansion +} + +// podSchedulingContextNamespaceLister implements the PodSchedulingContextNamespaceLister +// interface. +type podSchedulingContextNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PodSchedulingContexts in the indexer for a given namespace. +func (s podSchedulingContextNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.PodSchedulingContext, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.PodSchedulingContext)) + }) + return ret, err +} + +// Get retrieves the PodSchedulingContext from the indexer for a given namespace and name. +func (s podSchedulingContextNamespaceLister) Get(name string) (*v1alpha2.PodSchedulingContext, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha2.Resource("podschedulingcontext"), name) + } + return obj.(*v1alpha2.PodSchedulingContext), nil +} diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaim.go b/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaim.go new file mode 100644 index 000000000..273f16af3 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaim.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + v1alpha2 "k8s.io/api/resource/v1alpha2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ResourceClaimLister helps list ResourceClaims. +// All objects returned here must be treated as read-only. +type ResourceClaimLister interface { + // List lists all ResourceClaims in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha2.ResourceClaim, err error) + // ResourceClaims returns an object that can list and get ResourceClaims. + ResourceClaims(namespace string) ResourceClaimNamespaceLister + ResourceClaimListerExpansion +} + +// resourceClaimLister implements the ResourceClaimLister interface. +type resourceClaimLister struct { + indexer cache.Indexer +} + +// NewResourceClaimLister returns a new ResourceClaimLister. +func NewResourceClaimLister(indexer cache.Indexer) ResourceClaimLister { + return &resourceClaimLister{indexer: indexer} +} + +// List lists all ResourceClaims in the indexer. +func (s *resourceClaimLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaim, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.ResourceClaim)) + }) + return ret, err +} + +// ResourceClaims returns an object that can list and get ResourceClaims. +func (s *resourceClaimLister) ResourceClaims(namespace string) ResourceClaimNamespaceLister { + return resourceClaimNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ResourceClaimNamespaceLister helps list and get ResourceClaims. +// All objects returned here must be treated as read-only. +type ResourceClaimNamespaceLister interface { + // List lists all ResourceClaims in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha2.ResourceClaim, err error) + // Get retrieves the ResourceClaim from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha2.ResourceClaim, error) + ResourceClaimNamespaceListerExpansion +} + +// resourceClaimNamespaceLister implements the ResourceClaimNamespaceLister +// interface. +type resourceClaimNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ResourceClaims in the indexer for a given namespace. +func (s resourceClaimNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaim, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.ResourceClaim)) + }) + return ret, err +} + +// Get retrieves the ResourceClaim from the indexer for a given namespace and name. +func (s resourceClaimNamespaceLister) Get(name string) (*v1alpha2.ResourceClaim, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha2.Resource("resourceclaim"), name) + } + return obj.(*v1alpha2.ResourceClaim), nil +} diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimtemplate.go b/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimtemplate.go new file mode 100644 index 000000000..91a488b17 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimtemplate.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + v1alpha2 "k8s.io/api/resource/v1alpha2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ResourceClaimTemplateLister helps list ResourceClaimTemplates. +// All objects returned here must be treated as read-only. +type ResourceClaimTemplateLister interface { + // List lists all ResourceClaimTemplates in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimTemplate, err error) + // ResourceClaimTemplates returns an object that can list and get ResourceClaimTemplates. + ResourceClaimTemplates(namespace string) ResourceClaimTemplateNamespaceLister + ResourceClaimTemplateListerExpansion +} + +// resourceClaimTemplateLister implements the ResourceClaimTemplateLister interface. +type resourceClaimTemplateLister struct { + indexer cache.Indexer +} + +// NewResourceClaimTemplateLister returns a new ResourceClaimTemplateLister. +func NewResourceClaimTemplateLister(indexer cache.Indexer) ResourceClaimTemplateLister { + return &resourceClaimTemplateLister{indexer: indexer} +} + +// List lists all ResourceClaimTemplates in the indexer. +func (s *resourceClaimTemplateLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimTemplate, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.ResourceClaimTemplate)) + }) + return ret, err +} + +// ResourceClaimTemplates returns an object that can list and get ResourceClaimTemplates. +func (s *resourceClaimTemplateLister) ResourceClaimTemplates(namespace string) ResourceClaimTemplateNamespaceLister { + return resourceClaimTemplateNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ResourceClaimTemplateNamespaceLister helps list and get ResourceClaimTemplates. +// All objects returned here must be treated as read-only. +type ResourceClaimTemplateNamespaceLister interface { + // List lists all ResourceClaimTemplates in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimTemplate, err error) + // Get retrieves the ResourceClaimTemplate from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha2.ResourceClaimTemplate, error) + ResourceClaimTemplateNamespaceListerExpansion +} + +// resourceClaimTemplateNamespaceLister implements the ResourceClaimTemplateNamespaceLister +// interface. +type resourceClaimTemplateNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ResourceClaimTemplates in the indexer for a given namespace. +func (s resourceClaimTemplateNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimTemplate, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.ResourceClaimTemplate)) + }) + return ret, err +} + +// Get retrieves the ResourceClaimTemplate from the indexer for a given namespace and name. +func (s resourceClaimTemplateNamespaceLister) Get(name string) (*v1alpha2.ResourceClaimTemplate, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha2.Resource("resourceclaimtemplate"), name) + } + return obj.(*v1alpha2.ResourceClaimTemplate), nil +} diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclass.go b/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclass.go new file mode 100644 index 000000000..eeb2fc337 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclass.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + v1alpha2 "k8s.io/api/resource/v1alpha2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ResourceClassLister helps list ResourceClasses. +// All objects returned here must be treated as read-only. +type ResourceClassLister interface { + // List lists all ResourceClasses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha2.ResourceClass, err error) + // Get retrieves the ResourceClass from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha2.ResourceClass, error) + ResourceClassListerExpansion +} + +// resourceClassLister implements the ResourceClassLister interface. +type resourceClassLister struct { + indexer cache.Indexer +} + +// NewResourceClassLister returns a new ResourceClassLister. +func NewResourceClassLister(indexer cache.Indexer) ResourceClassLister { + return &resourceClassLister{indexer: indexer} +} + +// List lists all ResourceClasses in the indexer. +func (s *resourceClassLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClass, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.ResourceClass)) + }) + return ret, err +} + +// Get retrieves the ResourceClass from the index for a given name. +func (s *resourceClassLister) Get(name string) (*v1alpha2.ResourceClass, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha2.Resource("resourceclass"), name) + } + return obj.(*v1alpha2.ResourceClass), nil +} diff --git a/vendor/k8s.io/client-go/openapi/OWNERS b/vendor/k8s.io/client-go/openapi/OWNERS new file mode 100644 index 000000000..e61009424 --- /dev/null +++ b/vendor/k8s.io/client-go/openapi/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - apelisse diff --git a/vendor/k8s.io/client-go/openapi/client.go b/vendor/k8s.io/client-go/openapi/client.go index 7b58762ac..6a4305718 100644 --- a/vendor/k8s.io/client-go/openapi/client.go +++ b/vendor/k8s.io/client-go/openapi/client.go @@ -19,6 +19,7 @@ package openapi import ( "context" "encoding/json" + "strings" "k8s.io/client-go/rest" "k8s.io/kube-openapi/pkg/handler3" @@ -58,7 +59,11 @@ func (c *client) Paths() (map[string]GroupVersion, error) { // Create GroupVersions for each element of the result result := map[string]GroupVersion{} for k, v := range discoMap.Paths { - result[k] = newGroupVersion(c, v) + // If the server returned a URL rooted at /openapi/v3, preserve any additional client-side prefix. + // If the server returned a URL not rooted at /openapi/v3, treat it as an actual server-relative URL. + // See https://github.com/kubernetes/kubernetes/issues/117463 for details + useClientPrefix := strings.HasPrefix(v.ServerRelativeURL, "/openapi/v3") + result[k] = newGroupVersion(c, v, useClientPrefix) } return result, nil } diff --git a/vendor/k8s.io/client-go/openapi/groupversion.go b/vendor/k8s.io/client-go/openapi/groupversion.go index 7c35833b4..601dcbe3c 100644 --- a/vendor/k8s.io/client-go/openapi/groupversion.go +++ b/vendor/k8s.io/client-go/openapi/groupversion.go @@ -18,42 +18,53 @@ package openapi import ( "context" + "net/url" - openapi_v3 "github.com/google/gnostic/openapiv3" - "google.golang.org/protobuf/proto" "k8s.io/kube-openapi/pkg/handler3" ) -const openAPIV3mimePb = "application/com.github.proto-openapi.spec.v3@v1.0+protobuf" +const ContentTypeOpenAPIV3PB = "application/com.github.proto-openapi.spec.v3@v1.0+protobuf" type GroupVersion interface { - Schema() (*openapi_v3.Document, error) + Schema(contentType string) ([]byte, error) } type groupversion struct { - client *client - item handler3.OpenAPIV3DiscoveryGroupVersion + client *client + item handler3.OpenAPIV3DiscoveryGroupVersion + useClientPrefix bool } -func newGroupVersion(client *client, item handler3.OpenAPIV3DiscoveryGroupVersion) *groupversion { - return &groupversion{client: client, item: item} +func newGroupVersion(client *client, item handler3.OpenAPIV3DiscoveryGroupVersion, useClientPrefix bool) *groupversion { + return &groupversion{client: client, item: item, useClientPrefix: useClientPrefix} } -func (g *groupversion) Schema() (*openapi_v3.Document, error) { - data, err := g.client.restClient.Get(). - RequestURI(g.item.ServerRelativeURL). - SetHeader("Accept", openAPIV3mimePb). - Do(context.TODO()). - Raw() +func (g *groupversion) Schema(contentType string) ([]byte, error) { + if !g.useClientPrefix { + return g.client.restClient.Get(). + RequestURI(g.item.ServerRelativeURL). + SetHeader("Accept", contentType). + Do(context.TODO()). + Raw() + } + locator, err := url.Parse(g.item.ServerRelativeURL) if err != nil { return nil, err } - document := &openapi_v3.Document{} - if err := proto.Unmarshal(data, document); err != nil { - return nil, err + path := g.client.restClient.Get(). + AbsPath(locator.Path). + SetHeader("Accept", contentType) + + // Other than root endpoints(openapiv3/apis), resources have hash query parameter to support etags. + // However, absPath does not support handling query parameters internally, + // so that hash query parameter is added manually + for k, value := range locator.Query() { + for _, v := range value { + path.Param(k, v) + } } - return document, nil + return path.Do(context.TODO()).Raw() } diff --git a/vendor/k8s.io/client-go/openapi/typeconverter.go b/vendor/k8s.io/client-go/openapi/typeconverter.go new file mode 100644 index 000000000..4b91e66d4 --- /dev/null +++ b/vendor/k8s.io/client-go/openapi/typeconverter.go @@ -0,0 +1,48 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + "encoding/json" + "fmt" + + "k8s.io/apimachinery/pkg/util/managedfields" + "k8s.io/kube-openapi/pkg/spec3" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +func NewTypeConverter(client Client, preserveUnknownFields bool) (managedfields.TypeConverter, error) { + spec := map[string]*spec.Schema{} + paths, err := client.Paths() + if err != nil { + return nil, fmt.Errorf("failed to list paths: %w", err) + } + for _, gv := range paths { + s, err := gv.Schema("application/json") + if err != nil { + return nil, fmt.Errorf("failed to download schema: %w", err) + } + var openapi spec3.OpenAPI + if err := json.Unmarshal(s, &openapi); err != nil { + return nil, fmt.Errorf("failed to parse schema: %w", err) + } + for k, v := range openapi.Components.Schemas { + spec[k] = v + } + } + return managedfields.NewTypeConverter(spec, preserveUnknownFields) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go index 1b6322da5..dae1bf1b0 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go @@ -98,6 +98,11 @@ type Cluster struct { // cluster. // +optional ProxyURL string + // DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful + // to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on + // compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296. + // +optional + DisableCompression bool // Config holds additional config data that is specific to the exec // plugin with regards to the cluster being authenticated to. // diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go index 0c4754dd2..b9082e814 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go @@ -96,6 +96,11 @@ type Cluster struct { // cluster. // +optional ProxyURL string `json:"proxy-url,omitempty"` + // DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful + // to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on + // compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296. + // +optional + DisableCompression bool `json:"disable-compression,omitempty"` // Config holds additional config data that is specific to the exec // plugin with regards to the cluster being authenticated to. // diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go index 82fe94ca9..9c3f63fca 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go @@ -86,6 +86,7 @@ func autoConvert_v1_Cluster_To_clientauthentication_Cluster(in *Cluster, out *cl out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) out.ProxyURL = in.ProxyURL + out.DisableCompression = in.DisableCompression if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Config, &out.Config, s); err != nil { return err } @@ -103,6 +104,7 @@ func autoConvert_clientauthentication_Cluster_To_v1_Cluster(in *clientauthentica out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) out.ProxyURL = in.ProxyURL + out.DisableCompression = in.DisableCompression if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Config, &out.Config, s); err != nil { return err } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go index 714b0273a..918eb11e1 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go @@ -96,6 +96,11 @@ type Cluster struct { // cluster. // +optional ProxyURL string `json:"proxy-url,omitempty"` + // DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful + // to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on + // compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296. + // +optional + DisableCompression bool `json:"disable-compression,omitempty"` // Config holds additional config data that is specific to the exec // plugin with regards to the cluster being authenticated to. // diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go index 8c4d43fea..5372da151 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go @@ -86,6 +86,7 @@ func autoConvert_v1beta1_Cluster_To_clientauthentication_Cluster(in *Cluster, ou out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) out.ProxyURL = in.ProxyURL + out.DisableCompression = in.DisableCompression if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Config, &out.Config, s); err != nil { return err } @@ -103,6 +104,7 @@ func autoConvert_clientauthentication_Cluster_To_v1beta1_Cluster(in *clientauthe out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) out.ProxyURL = in.ProxyURL + out.DisableCompression = in.DisableCompression if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Config, &out.Config, s); err != nil { return err } diff --git a/vendor/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go index 51e34dda3..676d51d32 100644 --- a/vendor/k8s.io/client-go/pkg/version/base.go +++ b/vendor/k8s.io/client-go/pkg/version/base.go @@ -43,7 +43,8 @@ var ( gitMinor string = "" // minor version, numeric possibly followed by "+" // semantic version, derived by build scripts (see - // https://git.k8s.io/community/contributors/design-proposals/release/versioning.md + // https://github.com/kubernetes/sig-release/blob/master/release-engineering/versioning.md#kubernetes-release-versioning + // https://kubernetes.io/releases/version-skew-policy/ // for a detailed discussion of this field) // // TODO: This field is still called "gitVersion" for legacy diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go index d37dfbf73..b471f5cc6 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go @@ -32,12 +32,12 @@ import ( "sync" "time" - "github.com/davecgh/go-spew/spew" "golang.org/x/term" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/dump" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/client-go/pkg/apis/clientauthentication" "k8s.io/client-go/pkg/apis/clientauthentication/install" @@ -81,8 +81,6 @@ func newCache() *cache { return &cache{m: make(map[string]*Authenticator)} } -var spewConfig = &spew.ConfigState{DisableMethods: true, Indent: " "} - func cacheKey(conf *api.ExecConfig, cluster *clientauthentication.Cluster) string { key := struct { conf *api.ExecConfig @@ -91,7 +89,7 @@ func cacheKey(conf *api.ExecConfig, cluster *clientauthentication.Cluster) strin conf: conf, cluster: cluster, } - return spewConfig.Sprint(key) + return dump.Pretty(key) } type cache struct { @@ -199,14 +197,18 @@ func newAuthenticator(c *cache, isTerminalFunc func(int) bool, config *api.ExecC now: time.Now, environ: os.Environ, - defaultDialer: defaultDialer, - connTracker: connTracker, + connTracker: connTracker, } for _, env := range config.Env { a.env = append(a.env, env.Name+"="+env.Value) } + // these functions are made comparable and stored in the cache so that repeated clientset + // construction with the same rest.Config results in a single TLS cache and Authenticator + a.getCert = &transport.GetCertHolder{GetCert: a.cert} + a.dial = &transport.DialHolder{Dial: defaultDialer.DialContext} + return c.put(key, a), nil } @@ -261,8 +263,6 @@ type Authenticator struct { now func() time.Time environ func() []string - // defaultDialer is used for clients which don't specify a custom dialer - defaultDialer *connrotation.Dialer // connTracker tracks all connections opened that we need to close when rotating a client certificate connTracker *connrotation.ConnectionTracker @@ -273,6 +273,12 @@ type Authenticator struct { mu sync.Mutex cachedCreds *credentials exp time.Time + + // getCert makes Authenticator.cert comparable to support TLS config caching + getCert *transport.GetCertHolder + // dial is used for clients which do not specify a custom dialer + // it is comparable to support TLS config caching + dial *transport.DialHolder } type credentials struct { @@ -300,18 +306,21 @@ func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error { if c.HasCertCallback() { return errors.New("can't add TLS certificate callback: transport.Config.TLS.GetCert already set") } - c.TLS.GetCert = a.cert + c.TLS.GetCertHolder = a.getCert // comparable for TLS config caching + + if c.DialHolder != nil { + if c.DialHolder.Dial == nil { + return errors.New("invalid transport.Config.DialHolder: wrapped Dial function is nil") + } - var d *connrotation.Dialer - if c.Dial != nil { // if c has a custom dialer, we have to wrap it - d = connrotation.NewDialerWithTracker(c.Dial, a.connTracker) + // TLS config caching is not supported for this config + d := connrotation.NewDialerWithTracker(c.DialHolder.Dial, a.connTracker) + c.DialHolder = &transport.DialHolder{Dial: d.DialContext} } else { - d = a.defaultDialer + c.DialHolder = a.dial // comparable for TLS config caching } - c.Dial = d.DialContext - return nil } diff --git a/vendor/k8s.io/client-go/rest/client.go b/vendor/k8s.io/client-go/rest/client.go index c96930049..60df7e568 100644 --- a/vendor/k8s.io/client-go/rest/client.go +++ b/vendor/k8s.io/client-go/rest/client.go @@ -52,7 +52,7 @@ type Interface interface { // ClientContentConfig controls how RESTClient communicates with the server. // // TODO: ContentConfig will be updated to accept a Negotiator instead of a -// NegotiatedSerializer and NegotiatedSerializer will be removed. +// NegotiatedSerializer and NegotiatedSerializer will be removed. type ClientContentConfig struct { // AcceptContentTypes specifies the types the client will accept and is optional. // If not set, ContentType will be used to define the Accept header @@ -159,13 +159,14 @@ func readExpBackoffConfig() BackoffManager { // c, err := NewRESTClient(...) // if err != nil { ... } // resp, err := c.Verb("GET"). -// Path("pods"). -// SelectorParam("labels", "area=staging"). -// Timeout(10*time.Second). -// Do() +// +// Path("pods"). +// SelectorParam("labels", "area=staging"). +// Timeout(10*time.Second). +// Do() +// // if err != nil { ... } // list, ok := resp.(*api.PodList) -// func (c *RESTClient) Verb(verb string) *Request { return NewRequest(c).Verb(verb) } diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index 4909dc53a..f8ff7e928 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -20,7 +20,6 @@ import ( "context" "errors" "fmt" - "io/ioutil" "net" "net/http" "net/url" @@ -317,7 +316,7 @@ func RESTClientFor(config *Config) (*RESTClient, error) { // Validate config.Host before constructing the transport/client so we can fail fast. // ServerURL will be obtained later in RESTClientForConfigAndClient() - _, _, err := defaultServerUrlFor(config) + _, _, err := DefaultServerUrlFor(config) if err != nil { return nil, err } @@ -344,7 +343,7 @@ func RESTClientForConfigAndClient(config *Config, httpClient *http.Client) (*RES return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") } - baseURL, versionedAPIPath, err := defaultServerUrlFor(config) + baseURL, versionedAPIPath, err := DefaultServerUrlFor(config) if err != nil { return nil, err } @@ -391,7 +390,7 @@ func UnversionedRESTClientFor(config *Config) (*RESTClient, error) { // Validate config.Host before constructing the transport/client so we can fail fast. // ServerURL will be obtained later in UnversionedRESTClientForConfigAndClient() - _, _, err := defaultServerUrlFor(config) + _, _, err := DefaultServerUrlFor(config) if err != nil { return nil, err } @@ -411,7 +410,7 @@ func UnversionedRESTClientForConfigAndClient(config *Config, httpClient *http.Cl return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") } - baseURL, versionedAPIPath, err := defaultServerUrlFor(config) + baseURL, versionedAPIPath, err := DefaultServerUrlFor(config) if err != nil { return nil, err } @@ -519,7 +518,7 @@ func InClusterConfig() (*Config, error) { return nil, ErrNotInCluster } - token, err := ioutil.ReadFile(tokenFile) + token, err := os.ReadFile(tokenFile) if err != nil { return nil, err } @@ -549,7 +548,7 @@ func InClusterConfig() (*Config, error) { // Note: the Insecure flag is ignored when testing for this value, so MITM attacks are // still possible. func IsConfigTransportTLS(config Config) bool { - baseURL, _, err := defaultServerUrlFor(&config) + baseURL, _, err := DefaultServerUrlFor(&config) if err != nil { return false } @@ -572,10 +571,7 @@ func LoadTLSFiles(c *Config) error { } c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile) - if err != nil { - return err - } - return nil + return err } // dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, @@ -585,7 +581,7 @@ func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { return data, nil } if len(file) > 0 { - fileData, err := ioutil.ReadFile(file) + fileData, err := os.ReadFile(file) if err != nil { return []byte{}, err } diff --git a/vendor/k8s.io/client-go/rest/exec.go b/vendor/k8s.io/client-go/rest/exec.go index 5f3b43c55..96f5dba35 100644 --- a/vendor/k8s.io/client-go/rest/exec.go +++ b/vendor/k8s.io/client-go/rest/exec.go @@ -21,7 +21,6 @@ import ( "net/http" "net/url" - "k8s.io/client-go/pkg/apis/clientauthentication" clientauthenticationapi "k8s.io/client-go/pkg/apis/clientauthentication" ) @@ -50,12 +49,13 @@ func ConfigToExecCluster(config *Config) (*clientauthenticationapi.Cluster, erro } } - return &clientauthentication.Cluster{ + return &clientauthenticationapi.Cluster{ Server: config.Host, TLSServerName: config.ServerName, InsecureSkipTLSVerify: config.Insecure, CertificateAuthorityData: caData, ProxyURL: proxyURL, + DisableCompression: config.DisableCompression, Config: config.ExecProvider.Config, }, nil } @@ -63,7 +63,7 @@ func ConfigToExecCluster(config *Config) (*clientauthenticationapi.Cluster, erro // ExecClusterToConfig creates a Config with the corresponding fields from the provided // clientauthenticationapi.Cluster. The returned Config will be anonymous (i.e., it will not have // any authentication-related fields set). -func ExecClusterToConfig(cluster *clientauthentication.Cluster) (*Config, error) { +func ExecClusterToConfig(cluster *clientauthenticationapi.Cluster) (*Config, error) { var proxy func(*http.Request) (*url.URL, error) if cluster.ProxyURL != "" { proxyURL, err := url.Parse(cluster.ProxyURL) @@ -80,6 +80,7 @@ func ExecClusterToConfig(cluster *clientauthentication.Cluster) (*Config, error) ServerName: cluster.TLSServerName, CAData: cluster.CertificateAuthorityData, }, - Proxy: proxy, + Proxy: proxy, + DisableCompression: cluster.DisableCompression, }, nil } diff --git a/vendor/k8s.io/client-go/rest/plugin.go b/vendor/k8s.io/client-go/rest/plugin.go index c2b3dfc0f..ae5cbdc2c 100644 --- a/vendor/k8s.io/client-go/rest/plugin.go +++ b/vendor/k8s.io/client-go/rest/plugin.go @@ -36,9 +36,10 @@ type AuthProvider interface { } // Factory generates an AuthProvider plugin. -// clusterAddress is the address of the current cluster. -// config is the initial configuration for this plugin. -// persister allows the plugin to save updated configuration. +// +// clusterAddress is the address of the current cluster. +// config is the initial configuration for this plugin. +// persister allows the plugin to save updated configuration. type Factory func(clusterAddress string, config map[string]string, persister AuthProviderConfigPersister) (AuthProvider, error) // AuthProviderConfigPersister allows a plugin to persist configuration info diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 3a1560df0..850e57dae 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -22,10 +22,11 @@ import ( "encoding/hex" "fmt" "io" - "io/ioutil" "mime" "net/http" + "net/http/httptrace" "net/url" + "os" "path" "reflect" "strconv" @@ -34,6 +35,7 @@ import ( "time" "golang.org/x/net/http2" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -116,8 +118,11 @@ type Request struct { subresource string // output - err error - body io.Reader + err error + + // only one of body / bodyBytes may be set. requests using body are not retriable. + body io.Reader + bodyBytes []byte retryFn requestRetryFunc } @@ -437,18 +442,21 @@ func (r *Request) Body(obj interface{}) *Request { } switch t := obj.(type) { case string: - data, err := ioutil.ReadFile(t) + data, err := os.ReadFile(t) if err != nil { r.err = err return r } glogBody("Request Body", data) - r.body = bytes.NewReader(data) + r.body = nil + r.bodyBytes = data case []byte: glogBody("Request Body", t) - r.body = bytes.NewReader(t) + r.body = nil + r.bodyBytes = t case io.Reader: r.body = t + r.bodyBytes = nil case runtime.Object: // callers may pass typed interface pointers, therefore we must check nil with reflection if reflect.ValueOf(t).IsNil() { @@ -465,7 +473,8 @@ func (r *Request) Body(obj interface{}) *Request { return r } glogBody("Request Body", data) - r.body = bytes.NewReader(data) + r.body = nil + r.bodyBytes = data r.SetHeader("Content-Type", r.c.content.ContentType) default: r.err = fmt.Errorf("unknown type used for body: %+v", obj) @@ -473,7 +482,13 @@ func (r *Request) Body(obj interface{}) *Request { return r } -// URL returns the current working URL. +// Error returns any error encountered constructing the request, if any. +func (r *Request) Error() error { + return r.err +} + +// URL returns the current working URL. Check the result of Error() to ensure +// that the returned URL is valid. func (r *Request) URL() *url.URL { p := r.pathPrefix if r.namespaceSet && len(r.namespace) > 0 { @@ -508,6 +523,87 @@ func (r *Request) URL() *url.URL { return finalURL } +// finalURLTemplate is similar to URL(), but will make all specific parameter values equal +// - instead of name or namespace, "{name}" and "{namespace}" will be used, and all query +// parameters will be reset. This creates a copy of the url so as not to change the +// underlying object. +func (r Request) finalURLTemplate() url.URL { + newParams := url.Values{} + v := []string{"{value}"} + for k := range r.params { + newParams[k] = v + } + r.params = newParams + u := r.URL() + if u == nil { + return url.URL{} + } + + segments := strings.Split(u.Path, "/") + groupIndex := 0 + index := 0 + trimmedBasePath := "" + if r.c.base != nil && strings.Contains(u.Path, r.c.base.Path) { + p := strings.TrimPrefix(u.Path, r.c.base.Path) + if !strings.HasPrefix(p, "/") { + p = "/" + p + } + // store the base path that we have trimmed so we can append it + // before returning the URL + trimmedBasePath = r.c.base.Path + segments = strings.Split(p, "/") + groupIndex = 1 + } + if len(segments) <= 2 { + return *u + } + + const CoreGroupPrefix = "api" + const NamedGroupPrefix = "apis" + isCoreGroup := segments[groupIndex] == CoreGroupPrefix + isNamedGroup := segments[groupIndex] == NamedGroupPrefix + if isCoreGroup { + // checking the case of core group with /api/v1/... format + index = groupIndex + 2 + } else if isNamedGroup { + // checking the case of named group with /apis/apps/v1/... format + index = groupIndex + 3 + } else { + // this should not happen that the only two possibilities are /api... and /apis..., just want to put an + // outlet here in case more API groups are added in future if ever possible: + // https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-groups + // if a wrong API groups name is encountered, return the {prefix} for url.Path + u.Path = "/{prefix}" + u.RawQuery = "" + return *u + } + // switch segLength := len(segments) - index; segLength { + switch { + // case len(segments) - index == 1: + // resource (with no name) do nothing + case len(segments)-index == 2: + // /$RESOURCE/$NAME: replace $NAME with {name} + segments[index+1] = "{name}" + case len(segments)-index == 3: + if segments[index+2] == "finalize" || segments[index+2] == "status" { + // /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name} + segments[index+1] = "{name}" + } else { + // /namespace/$NAMESPACE/$RESOURCE: replace $NAMESPACE with {namespace} + segments[index+1] = "{namespace}" + } + case len(segments)-index >= 4: + segments[index+1] = "{namespace}" + // /namespace/$NAMESPACE/$RESOURCE/$NAME: replace $NAMESPACE with {namespace}, $NAME with {name} + if segments[index+3] != "finalize" && segments[index+3] != "status" { + // /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name} + segments[index+3] = "{name}" + } + } + u.Path = path.Join(trimmedBasePath, path.Join(segments...)) + return *u +} + func (r *Request) tryThrottleWithInfo(ctx context.Context, retryInfo string) error { if r.rateLimiter == nil { return nil @@ -537,7 +633,7 @@ func (r *Request) tryThrottleWithInfo(ctx context.Context, retryInfo string) err // but we use a throttled logger to prevent spamming. globalThrottledLogger.Infof("%s", message) } - metrics.RateLimiterLatency.Observe(ctx, r.verb, *r.URL(), latency) + metrics.RateLimiterLatency.Observe(ctx, r.verb, r.finalURLTemplate(), latency) return err } @@ -637,7 +733,6 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) { } resp, err := client.Do(req) - updateURLMetrics(ctx, r, resp, err) retry.After(ctx, r, resp, err) if err == nil && resp.StatusCode == http.StatusOK { return r.newStreamWatcher(resp) @@ -697,22 +792,36 @@ func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, error) ), nil } -// updateURLMetrics is a convenience function for pushing metrics. -// It also handles corner cases for incomplete/invalid request data. -func updateURLMetrics(ctx context.Context, req *Request, resp *http.Response, err error) { - url := "none" +// updateRequestResultMetric increments the RequestResult metric counter, +// it should be called with the (response, err) tuple from the final +// reply from the server. +func updateRequestResultMetric(ctx context.Context, req *Request, resp *http.Response, err error) { + code, host := sanitize(req, resp, err) + metrics.RequestResult.Increment(ctx, code, req.verb, host) +} + +// updateRequestRetryMetric increments the RequestRetry metric counter, +// it should be called with the (response, err) tuple for each retry +// except for the final attempt. +func updateRequestRetryMetric(ctx context.Context, req *Request, resp *http.Response, err error) { + code, host := sanitize(req, resp, err) + metrics.RequestRetry.IncrementRetry(ctx, code, req.verb, host) +} + +func sanitize(req *Request, resp *http.Response, err error) (string, string) { + host := "none" if req.c.base != nil { - url = req.c.base.Host + host = req.c.base.Host } // Errors can be arbitrary strings. Unbound label cardinality is not suitable for a metric // system so we just report them as ``. - if err != nil { - metrics.RequestResult.Increment(ctx, "", req.verb, url) - } else { - //Metrics for failure codes - metrics.RequestResult.Increment(ctx, strconv.Itoa(resp.StatusCode), req.verb, url) + code := "" + if resp != nil { + code = strconv.Itoa(resp.StatusCode) } + + return code, host } // Stream formats and executes the request, and offers streaming of the response. @@ -744,11 +853,7 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) { if err != nil { return nil, err } - if r.body != nil { - req.Body = ioutil.NopCloser(r.body) - } resp, err := client.Do(req) - updateURLMetrics(ctx, r, resp, err) retry.After(ctx, r, resp, err) if err != nil { // we only retry on an HTTP response with 'Retry-After' header @@ -808,25 +913,60 @@ func (r *Request) requestPreflightCheck() error { } func (r *Request) newHTTPRequest(ctx context.Context) (*http.Request, error) { + var body io.Reader + switch { + case r.body != nil && r.bodyBytes != nil: + return nil, fmt.Errorf("cannot set both body and bodyBytes") + case r.body != nil: + body = r.body + case r.bodyBytes != nil: + // Create a new reader specifically for this request. + // Giving each request a dedicated reader allows retries to avoid races resetting the request body. + body = bytes.NewReader(r.bodyBytes) + } + url := r.URL().String() - req, err := http.NewRequest(r.verb, url, r.body) + req, err := http.NewRequestWithContext(httptrace.WithClientTrace(ctx, newDNSMetricsTrace(ctx)), r.verb, url, body) if err != nil { return nil, err } - req = req.WithContext(ctx) req.Header = r.headers return req, nil } +// newDNSMetricsTrace returns an HTTP trace that tracks time spent on DNS lookups per host. +// This metric is available in client as "rest_client_dns_resolution_duration_seconds". +func newDNSMetricsTrace(ctx context.Context) *httptrace.ClientTrace { + type dnsMetric struct { + start time.Time + host string + sync.Mutex + } + dns := &dnsMetric{} + return &httptrace.ClientTrace{ + DNSStart: func(info httptrace.DNSStartInfo) { + dns.Lock() + defer dns.Unlock() + dns.start = time.Now() + dns.host = info.Host + }, + DNSDone: func(info httptrace.DNSDoneInfo) { + dns.Lock() + defer dns.Unlock() + metrics.ResolverLatency.Observe(ctx, dns.host, time.Since(dns.start)) + }, + } +} + // request connects to the server and invokes the provided function when a server response is // received. It handles retry behavior and up front validation of requests. It will invoke // fn at most once. It will return an error if a problem occurred prior to connecting to the // server - the provided function is responsible for handling server errors. func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Response)) error { - //Metrics for total request latency + // Metrics for total request latency start := time.Now() defer func() { - metrics.RequestLatency.Observe(ctx, r.verb, *r.URL(), time.Since(start)) + metrics.RequestLatency.Observe(ctx, r.verb, r.finalURLTemplate(), time.Since(start)) }() if r.err != nil { @@ -881,7 +1021,6 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp return err } resp, err := client.Do(req) - updateURLMetrics(ctx, r, resp, err) // The value -1 or a value of 0 with a non-nil Body indicates that the length is unknown. // https://pkg.go.dev/net/http#Request if req.ContentLength >= 0 && !(req.Body != nil && req.ContentLength == 0) { @@ -892,7 +1031,7 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp done := func() bool { defer readAndCloseResponseBody(resp) - // if the the server returns an error in err, the response will be nil. + // if the server returns an error in err, the response will be nil. f := func(req *http.Request, resp *http.Response) { if resp == nil { return @@ -917,8 +1056,8 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp // processing. // // Error type: -// * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError -// * http.Client.Do errors are returned directly. +// - If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError +// - http.Client.Do errors are returned directly. func (r *Request) Do(ctx context.Context) Result { var result Result err := r.request(ctx, func(req *http.Request, resp *http.Response) { @@ -937,7 +1076,7 @@ func (r *Request) Do(ctx context.Context) Result { func (r *Request) DoRaw(ctx context.Context) ([]byte, error) { var result Result err := r.request(ctx, func(req *http.Request, resp *http.Response) { - result.body, result.err = ioutil.ReadAll(resp.Body) + result.body, result.err = io.ReadAll(resp.Body) glogBody("Response Body", result.body) if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent { result.err = r.transformUnstructuredResponseError(resp, req, result.body) @@ -956,7 +1095,7 @@ func (r *Request) DoRaw(ctx context.Context) ([]byte, error) { func (r *Request) transformResponse(resp *http.Response, req *http.Request) Result { var body []byte if resp.Body != nil { - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) switch err.(type) { case nil: body = data @@ -1085,20 +1224,20 @@ const maxUnstructuredResponseTextBytes = 2048 // unexpected responses. The rough structure is: // // 1. Assume the server sends you something sane - JSON + well defined error objects + proper codes -// - this is the happy path -// - when you get this output, trust what the server sends -// 2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to -// generate a reasonable facsimile of the original failure. -// - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above -// 3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error -// 4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected -// initial contact, the presence of mismatched body contents from posted content types -// - Give these a separate distinct error type and capture as much as possible of the original message +// - this is the happy path +// - when you get this output, trust what the server sends +// 2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to +// generate a reasonable facsimile of the original failure. +// - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above +// 3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error +// 4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected +// initial contact, the presence of mismatched body contents from posted content types +// - Give these a separate distinct error type and capture as much as possible of the original message // // TODO: introduce transformation of generic http.Client.Do() errors that separates 4. func (r *Request) transformUnstructuredResponseError(resp *http.Response, req *http.Request, body []byte) error { if body == nil && resp.Body != nil { - if data, err := ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: maxUnstructuredResponseTextBytes}); err == nil { + if data, err := io.ReadAll(&io.LimitedReader{R: resp.Body, N: maxUnstructuredResponseTextBytes}); err == nil { body = data } } @@ -1207,6 +1346,14 @@ func (r Result) StatusCode(statusCode *int) Result { return r } +// ContentType returns the "Content-Type" response header into the passed +// string, returning the Result for possible chaining. (Only valid if no +// error code was returned.) +func (r Result) ContentType(contentType *string) Result { + *contentType = r.contentType + return r +} + // Into stores the result into obj, if possible. If obj is nil it is ignored. // If the returned object is of type Status and has .Status != StatusSuccess, the // additional information in Status will be used to enrich the error. diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go index 7c38c6d92..53f986cbf 100644 --- a/vendor/k8s.io/client-go/rest/transport.go +++ b/vendor/k8s.io/client-go/rest/transport.go @@ -108,10 +108,13 @@ func (c *Config) TransportConfig() (*transport.Config, error) { Groups: c.Impersonate.Groups, Extra: c.Impersonate.Extra, }, - Dial: c.Dial, Proxy: c.Proxy, } + if c.Dial != nil { + conf.DialHolder = &transport.DialHolder{Dial: c.Dial} + } + if c.ExecProvider != nil && c.AuthProvider != nil { return nil, errors.New("execProvider and authProvider cannot be used in combination") } diff --git a/vendor/k8s.io/client-go/rest/url_utils.go b/vendor/k8s.io/client-go/rest/url_utils.go index a56d1838d..c4ce6e3b8 100644 --- a/vendor/k8s.io/client-go/rest/url_utils.go +++ b/vendor/k8s.io/client-go/rest/url_utils.go @@ -77,9 +77,9 @@ func DefaultVersionedAPIPath(apiPath string, groupVersion schema.GroupVersion) s return versionedAPIPath } -// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It +// DefaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It // requires Host and Version to be set prior to being called. -func defaultServerUrlFor(config *Config) (*url.URL, string, error) { +func DefaultServerUrlFor(config *Config) (*url.URL, string, error) { // TODO: move the default to secure when the apiserver supports TLS by default // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA." hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0 diff --git a/vendor/k8s.io/client-go/rest/warnings.go b/vendor/k8s.io/client-go/rest/warnings.go index 18476f5ff..ad493659f 100644 --- a/vendor/k8s.io/client-go/rest/warnings.go +++ b/vendor/k8s.io/client-go/rest/warnings.go @@ -40,9 +40,9 @@ var ( // SetDefaultWarningHandler sets the default handler clients use when warning headers are encountered. // By default, warnings are logged. Several built-in implementations are provided: -// - NoWarnings suppresses warnings. -// - WarningLogger logs warnings. -// - NewWarningWriter() outputs warnings to the provided writer. +// - NoWarnings suppresses warnings. +// - WarningLogger logs warnings. +// - NewWarningWriter() outputs warnings to the provided writer. func SetDefaultWarningHandler(l WarningHandler) { defaultWarningHandlerLock.Lock() defer defaultWarningHandlerLock.Unlock() diff --git a/vendor/k8s.io/client-go/rest/with_retry.go b/vendor/k8s.io/client-go/rest/with_retry.go index 497d2608f..eaaadc6a4 100644 --- a/vendor/k8s.io/client-go/rest/with_retry.go +++ b/vendor/k8s.io/client-go/rest/with_retry.go @@ -20,7 +20,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/http" "net/url" "time" @@ -154,6 +153,11 @@ func (r *withRetry) IsNextRetry(ctx context.Context, restReq *Request, httpReq * return false } + if restReq.body != nil { + // we have an opaque reader, we can't safely reset it + return false + } + r.attempts++ r.retryAfter = &RetryAfter{Attempt: r.attempts} if r.attempts > r.maxRetries { @@ -204,30 +208,19 @@ func (r *withRetry) Before(ctx context.Context, request *Request) error { if r.retryAfter == nil { // we do a backoff sleep before the first attempt is made, // (preserving current behavior). - request.backoff.Sleep(request.backoff.CalculateBackoff(url)) - return nil - } - - // At this point we've made atleast one attempt, post which the response - // body should have been fully read and closed in order for it to be safe - // to reset the request body before we reconnect, in order for us to reuse - // the same TCP connection. - if seeker, ok := request.body.(io.Seeker); ok && request.body != nil { - if _, err := seeker.Seek(0, io.SeekStart); err != nil { - err = fmt.Errorf("failed to reset the request body while retrying a request: %v", err) - r.trackPreviousError(err) - return err + if request.backoff != nil { + request.backoff.Sleep(request.backoff.CalculateBackoff(url)) } + return nil } - // if we are here, we have made attempt(s) al least once before. + // if we are here, we have made attempt(s) at least once before. if request.backoff != nil { - // TODO(tkashem) with default set to use exponential backoff - // we can merge these two sleeps: - // BackOffManager.Sleep(max(backoffManager.CalculateBackoff(), retryAfter)) - // see https://github.com/kubernetes/kubernetes/issues/108302 - request.backoff.Sleep(r.retryAfter.Wait) - request.backoff.Sleep(request.backoff.CalculateBackoff(url)) + delay := request.backoff.CalculateBackoff(url) + if r.retryAfter.Wait > delay { + delay = r.retryAfter.Wait + } + request.backoff.Sleep(delay) } // We are retrying the request that we already send to @@ -249,8 +242,20 @@ func (r *withRetry) After(ctx context.Context, request *Request, resp *http.Resp // parameters calculated from the (response, err) tuple from // attempt N-1, so r.retryAfter is outdated and should not be // referred to here. + isRetry := r.retryAfter != nil r.retryAfter = nil + // the client finishes a single request after N attempts (1..N) + // - all attempts (1..N) are counted to the rest_client_requests_total + // metric (current behavior). + // - every attempt after the first (2..N) are counted to the + // rest_client_request_retries_total metric. + updateRequestResultMetric(ctx, request, resp, err) + if isRetry { + // this is attempt 2 or later + updateRequestRetryMetric(ctx, request, resp, err) + } + if request.c.base != nil { if err != nil { request.backoff.UpdateBackoff(request.URL(), err, 0) @@ -344,13 +349,21 @@ func readAndCloseResponseBody(resp *http.Response) { defer resp.Body.Close() if resp.ContentLength <= maxBodySlurpSize { - io.Copy(ioutil.Discard, &io.LimitedReader{R: resp.Body, N: maxBodySlurpSize}) + io.Copy(io.Discard, &io.LimitedReader{R: resp.Body, N: maxBodySlurpSize}) } } func retryAfterResponse() *http.Response { + return retryAfterResponseWithDelay("1") +} + +func retryAfterResponseWithDelay(delay string) *http.Response { + return retryAfterResponseWithCodeAndDelay(http.StatusInternalServerError, delay) +} + +func retryAfterResponseWithCodeAndDelay(code int, delay string) *http.Response { return &http.Response{ - StatusCode: http.StatusInternalServerError, - Header: http.Header{"Retry-After": []string{"1"}}, + StatusCode: code, + Header: http.Header{"Retry-After": []string{delay}}, } } diff --git a/vendor/k8s.io/client-go/restmapper/shortcut.go b/vendor/k8s.io/client-go/restmapper/shortcut.go index 714ba90a7..7ab3cd46f 100644 --- a/vendor/k8s.io/client-go/restmapper/shortcut.go +++ b/vendor/k8s.io/client-go/restmapper/shortcut.go @@ -43,7 +43,18 @@ func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInt // KindFor fulfills meta.RESTMapper func (e shortcutExpander) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - return e.RESTMapper.KindFor(e.expandResourceShortcut(resource)) + // expandResourceShortcut works with current API resources as read from discovery cache. + // In case of new CRDs this means we potentially don't have current state of discovery. + // In the current wiring in k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go#toRESTMapper, + // we are using DeferredDiscoveryRESTMapper which on KindFor failure will clear the + // cache and fetch all data from a cluster (see vendor/k8s.io/client-go/restmapper/discovery.go#KindFor). + // Thus another call to expandResourceShortcut, after a NoMatchError should successfully + // read Kind to the user or an error. + gvk, err := e.RESTMapper.KindFor(e.expandResourceShortcut(resource)) + if meta.IsNoMatchError(err) { + return e.RESTMapper.KindFor(e.expandResourceShortcut(resource)) + } + return gvk, err } // KindsFor fulfills meta.RESTMapper diff --git a/vendor/k8s.io/client-go/tools/auth/clientauth.go b/vendor/k8s.io/client-go/tools/auth/clientauth.go index 4c24f7997..5a256c1c3 100644 --- a/vendor/k8s.io/client-go/tools/auth/clientauth.go +++ b/vendor/k8s.io/client-go/tools/auth/clientauth.go @@ -32,7 +32,7 @@ Having a defined format allows: The file format is json, marshalled from a struct authcfg.Info. -Clinet libraries in other languages should use the same format. +Client libraries in other languages should use the same format. It is not intended to store general preferences, such as default namespace, output options, etc. CLIs (such as kubectl) and UIs should @@ -45,27 +45,26 @@ client.Client from an authcfg.Info. Example: - import ( - "pkg/client" - "pkg/client/auth" - ) - - info, err := auth.LoadFromFile(filename) - if err != nil { - // handle error - } - clientConfig = client.Config{} - clientConfig.Host = "example.com:4901" - clientConfig = info.MergeWithConfig() - client := client.New(clientConfig) - client.Pods(ns).List() + import ( + "pkg/client" + "pkg/client/auth" + ) + + info, err := auth.LoadFromFile(filename) + if err != nil { + // handle error + } + clientConfig = client.Config{} + clientConfig.Host = "example.com:4901" + clientConfig = info.MergeWithConfig() + client := client.New(clientConfig) + client.Pods(ns).List() */ package auth // TODO: need a way to rotate Tokens. Therefore, need a way for client object to be reset when the authcfg is updated. import ( "encoding/json" - "io/ioutil" "os" restclient "k8s.io/client-go/rest" @@ -90,7 +89,7 @@ func LoadFromFile(path string) (*Info, error) { if _, err := os.Stat(path); os.IsNotExist(err) { return nil, err } - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/tools/cache/OWNERS b/vendor/k8s.io/client-go/tools/cache/OWNERS index 726205b3d..921ac2fa0 100644 --- a/vendor/k8s.io/client-go/tools/cache/OWNERS +++ b/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -2,7 +2,6 @@ approvers: - thockin - - lavalamp - smarterclayton - wojtek-t - deads2k @@ -11,7 +10,6 @@ approvers: - ncdc reviewers: - thockin - - lavalamp - smarterclayton - wojtek-t - deads2k @@ -26,3 +24,5 @@ reviewers: - dims - ingvagabund - ncdc +emeritus_approvers: + - lavalamp diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index ff4c22de0..8a1104bde 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -50,11 +50,12 @@ type Config struct { Process ProcessFunc // ObjectType is an example object of the type this controller is - // expected to handle. Only the type needs to be right, except - // that when that is `unstructured.Unstructured` the object's - // `"apiVersion"` and `"kind"` must also be right. + // expected to handle. ObjectType runtime.Object + // ObjectDescription is the description to use when logging type-specific information about this controller. + ObjectDescription string + // FullResyncPeriod is the period at which ShouldResync is considered. FullResyncPeriod time.Duration @@ -84,7 +85,7 @@ type Config struct { type ShouldResyncFunc func() bool // ProcessFunc processes a single object. -type ProcessFunc func(obj interface{}) error +type ProcessFunc func(obj interface{}, isInInitialList bool) error // `*controller` implements Controller type controller struct { @@ -131,15 +132,18 @@ func (c *controller) Run(stopCh <-chan struct{}) { <-stopCh c.config.Queue.Close() }() - r := NewReflector( + r := NewReflectorWithOptions( c.config.ListerWatcher, c.config.ObjectType, c.config.Queue, - c.config.FullResyncPeriod, + ReflectorOptions{ + ResyncPeriod: c.config.FullResyncPeriod, + TypeDescription: c.config.ObjectDescription, + Clock: c.clock, + }, ) r.ShouldResync = c.config.ShouldResync r.WatchListPageSize = c.config.WatchListPageSize - r.clock = c.clock if c.config.WatchErrorHandler != nil { r.watchErrorHandler = c.config.WatchErrorHandler } @@ -199,19 +203,19 @@ func (c *controller) processLoop() { // can't return an error. The handlers MUST NOT modify the objects // received; this concerns not only the top level of structure but all // the data structures reachable from it. -// * OnAdd is called when an object is added. -// * OnUpdate is called when an object is modified. Note that oldObj is the -// last known state of the object-- it is possible that several changes -// were combined together, so you can't use this to see every single -// change. OnUpdate is also called when a re-list happens, and it will -// get called even if nothing changed. This is useful for periodically -// evaluating or syncing something. -// * OnDelete will get the final state of the item if it is known, otherwise -// it will get an object of type DeletedFinalStateUnknown. This can -// happen if the watch is closed and misses the delete event and we don't -// notice the deletion until the subsequent re-list. +// - OnAdd is called when an object is added. +// - OnUpdate is called when an object is modified. Note that oldObj is the +// last known state of the object-- it is possible that several changes +// were combined together, so you can't use this to see every single +// change. OnUpdate is also called when a re-list happens, and it will +// get called even if nothing changed. This is useful for periodically +// evaluating or syncing something. +// - OnDelete will get the final state of the item if it is known, otherwise +// it will get an object of type DeletedFinalStateUnknown. This can +// happen if the watch is closed and misses the delete event and we don't +// notice the deletion until the subsequent re-list. type ResourceEventHandler interface { - OnAdd(obj interface{}) + OnAdd(obj interface{}, isInInitialList bool) OnUpdate(oldObj, newObj interface{}) OnDelete(obj interface{}) } @@ -220,6 +224,9 @@ type ResourceEventHandler interface { // as few of the notification functions as you want while still implementing // ResourceEventHandler. This adapter does not remove the prohibition against // modifying the objects. +// +// See ResourceEventHandlerDetailedFuncs if your use needs to propagate +// HasSynced. type ResourceEventHandlerFuncs struct { AddFunc func(obj interface{}) UpdateFunc func(oldObj, newObj interface{}) @@ -227,7 +234,7 @@ type ResourceEventHandlerFuncs struct { } // OnAdd calls AddFunc if it's not nil. -func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) { +func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}, isInInitialList bool) { if r.AddFunc != nil { r.AddFunc(obj) } @@ -247,6 +254,36 @@ func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) { } } +// ResourceEventHandlerDetailedFuncs is exactly like ResourceEventHandlerFuncs +// except its AddFunc accepts the isInInitialList parameter, for propagating +// HasSynced. +type ResourceEventHandlerDetailedFuncs struct { + AddFunc func(obj interface{}, isInInitialList bool) + UpdateFunc func(oldObj, newObj interface{}) + DeleteFunc func(obj interface{}) +} + +// OnAdd calls AddFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnAdd(obj interface{}, isInInitialList bool) { + if r.AddFunc != nil { + r.AddFunc(obj, isInInitialList) + } +} + +// OnUpdate calls UpdateFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnUpdate(oldObj, newObj interface{}) { + if r.UpdateFunc != nil { + r.UpdateFunc(oldObj, newObj) + } +} + +// OnDelete calls DeleteFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnDelete(obj interface{}) { + if r.DeleteFunc != nil { + r.DeleteFunc(obj) + } +} + // FilteringResourceEventHandler applies the provided filter to all events coming // in, ensuring the appropriate nested handler method is invoked. An object // that starts passing the filter after an update is considered an add, and an @@ -258,11 +295,11 @@ type FilteringResourceEventHandler struct { } // OnAdd calls the nested handler only if the filter succeeds -func (r FilteringResourceEventHandler) OnAdd(obj interface{}) { +func (r FilteringResourceEventHandler) OnAdd(obj interface{}, isInInitialList bool) { if !r.FilterFunc(obj) { return } - r.Handler.OnAdd(obj) + r.Handler.OnAdd(obj, isInInitialList) } // OnUpdate ensures the proper handler is called depending on whether the filter matches @@ -273,7 +310,7 @@ func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) { case newer && older: r.Handler.OnUpdate(oldObj, newObj) case newer && !older: - r.Handler.OnAdd(newObj) + r.Handler.OnAdd(newObj, false) case !newer && older: r.Handler.OnDelete(oldObj) default: @@ -305,15 +342,14 @@ func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) { // notifications to be faulty. // // Parameters: -// * lw is list and watch functions for the source of the resource you want to -// be informed of. -// * objType is an object of the type that you expect to receive. -// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate -// calls, even if nothing changed). Otherwise, re-list will be delayed as -// long as possible (until the upstream source closes the watch or times out, -// or you stop the controller). -// * h is the object you want notifications sent to. -// +// - lw is list and watch functions for the source of the resource you want to +// be informed of. +// - objType is an object of the type that you expect to receive. +// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// - h is the object you want notifications sent to. func NewInformer( lw ListerWatcher, objType runtime.Object, @@ -332,16 +368,15 @@ func NewInformer( // notifications to be faulty. // // Parameters: -// * lw is list and watch functions for the source of the resource you want to -// be informed of. -// * objType is an object of the type that you expect to receive. -// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate -// calls, even if nothing changed). Otherwise, re-list will be delayed as -// long as possible (until the upstream source closes the watch or times out, -// or you stop the controller). -// * h is the object you want notifications sent to. -// * indexers is the indexer for the received object type. -// +// - lw is list and watch functions for the source of the resource you want to +// be informed of. +// - objType is an object of the type that you expect to receive. +// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// - h is the object you want notifications sent to. +// - indexers is the indexer for the received object type. func NewIndexerInformer( lw ListerWatcher, objType runtime.Object, @@ -355,17 +390,6 @@ func NewIndexerInformer( return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) } -// TransformFunc allows for transforming an object before it will be processed -// and put into the controller cache and before the corresponding handlers will -// be called on it. -// TransformFunc (similarly to ResourceEventHandler functions) should be able -// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown -// -// The most common usage pattern is to clean-up some parts of the object to -// reduce component memory usage if a given component doesn't care about them. -// given controller doesn't care for them -type TransformFunc func(interface{}) (interface{}, error) - // NewTransformingInformer returns a Store and a controller for populating // the store while also providing event notifications. You should only used // the returned Store for Get/List operations; Add/Modify/Deletes will cause @@ -413,19 +437,12 @@ func processDeltas( // Object which receives event notifications from the given deltas handler ResourceEventHandler, clientState Store, - transformer TransformFunc, deltas Deltas, + isInInitialList bool, ) error { // from oldest to newest for _, d := range deltas { obj := d.Object - if transformer != nil { - var err error - obj, err = transformer(obj) - if err != nil { - return err - } - } switch d.Type { case Sync, Replaced, Added, Updated: @@ -438,7 +455,7 @@ func processDeltas( if err := clientState.Add(obj); err != nil { return err } - handler.OnAdd(obj) + handler.OnAdd(obj, isInInitialList) } case Deleted: if err := clientState.Delete(obj); err != nil { @@ -454,16 +471,15 @@ func processDeltas( // providing event notifications. // // Parameters -// * lw is list and watch functions for the source of the resource you want to -// be informed of. -// * objType is an object of the type that you expect to receive. -// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate -// calls, even if nothing changed). Otherwise, re-list will be delayed as -// long as possible (until the upstream source closes the watch or times out, -// or you stop the controller). -// * h is the object you want notifications sent to. -// * clientState is the store you want to populate -// +// - lw is list and watch functions for the source of the resource you want to +// be informed of. +// - objType is an object of the type that you expect to receive. +// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// - h is the object you want notifications sent to. +// - clientState is the store you want to populate func newInformer( lw ListerWatcher, objType runtime.Object, @@ -478,6 +494,7 @@ func newInformer( fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ KnownObjects: clientState, EmitDeltaTypeReplaced: true, + Transformer: transformer, }) cfg := &Config{ @@ -487,9 +504,9 @@ func newInformer( FullResyncPeriod: resyncPeriod, RetryOnError: false, - Process: func(obj interface{}) error { + Process: func(obj interface{}, isInInitialList bool) error { if deltas, ok := obj.(Deltas); ok { - return processDeltas(h, clientState, transformer, deltas) + return processDeltas(h, clientState, deltas, isInInitialList) } return errors.New("object given as Process argument is not Deltas") }, diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 2da2933ab..7160bb1ee 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -51,6 +51,10 @@ type DeltaFIFOOptions struct { // When true, `Replaced` events will be sent for items passed to a Replace() call. // When false, `Sync` events will be sent instead. EmitDeltaTypeReplaced bool + + // If set, will be called for objects before enqueueing them. Please + // see the comment on TransformFunc for details. + Transformer TransformFunc } // DeltaFIFO is like FIFO, but differs in two ways. One is that the @@ -74,11 +78,11 @@ type DeltaFIFOOptions struct { // the Pop() method. // // DeltaFIFO solves this use case: -// * You want to process every object change (delta) at most once. -// * When you process an object, you want to see everything -// that's happened to it since you last processed it. -// * You want to process the deletion of some of the objects. -// * You might want to periodically reprocess objects. +// - You want to process every object change (delta) at most once. +// - When you process an object, you want to see everything +// that's happened to it since you last processed it. +// - You want to process the deletion of some of the objects. +// - You might want to periodically reprocess objects. // // DeltaFIFO's Pop(), Get(), and GetByKey() methods return // interface{} to satisfy the Store/Queue interfaces, but they @@ -129,8 +133,32 @@ type DeltaFIFO struct { // emitDeltaTypeReplaced is whether to emit the Replaced or Sync // DeltaType when Replace() is called (to preserve backwards compat). emitDeltaTypeReplaced bool + + // Called with every object if non-nil. + transformer TransformFunc } +// TransformFunc allows for transforming an object before it will be processed. +// TransformFunc (similarly to ResourceEventHandler functions) should be able +// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown. +// +// New in v1.27: In such cases, the contained object will already have gone +// through the transform object separately (when it was added / updated prior +// to the delete), so the TransformFunc can likely safely ignore such objects +// (i.e., just return the input object). +// +// The most common usage pattern is to clean-up some parts of the object to +// reduce component memory usage if a given component doesn't care about them. +// +// New in v1.27: unless the object is a DeletedFinalStateUnknown, TransformFunc +// sees the object before any other actor, and it is now safe to mutate the +// object in place instead of making a copy. +// +// Note that TransformFunc is called while inserting objects into the +// notification queue and is therefore extremely performance sensitive; please +// do not do anything that will take a long time. +type TransformFunc func(interface{}) (interface{}, error) + // DeltaType is the type of a change (addition, deletion, etc) type DeltaType string @@ -179,21 +207,21 @@ type Deltas []Delta // "known" keys when Pop() is called. Have to think about how that // affects error retrying. // -// NOTE: It is possible to misuse this and cause a race when using an -// external known object source. -// Whether there is a potential race depends on how the consumer -// modifies knownObjects. In Pop(), process function is called under -// lock, so it is safe to update data structures in it that need to be -// in sync with the queue (e.g. knownObjects). +// NOTE: It is possible to misuse this and cause a race when using an +// external known object source. +// Whether there is a potential race depends on how the consumer +// modifies knownObjects. In Pop(), process function is called under +// lock, so it is safe to update data structures in it that need to be +// in sync with the queue (e.g. knownObjects). // -// Example: -// In case of sharedIndexInformer being a consumer -// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/src/k8s.io/client-go/tools/cache/shared_informer.go#L192), -// there is no race as knownObjects (s.indexer) is modified safely -// under DeltaFIFO's lock. The only exceptions are GetStore() and -// GetIndexer() methods, which expose ways to modify the underlying -// storage. Currently these two methods are used for creating Lister -// and internal tests. +// Example: +// In case of sharedIndexInformer being a consumer +// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/src/k8s.io/client-go/tools/cache/shared_informer.go#L192), +// there is no race as knownObjects (s.indexer) is modified safely +// under DeltaFIFO's lock. The only exceptions are GetStore() and +// GetIndexer() methods, which expose ways to modify the underlying +// storage. Currently these two methods are used for creating Lister +// and internal tests. // // Also see the comment on DeltaFIFO. // @@ -227,6 +255,7 @@ func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO { knownObjects: opts.KnownObjects, emitDeltaTypeReplaced: opts.EmitDeltaTypeReplaced, + transformer: opts.Transformer, } f.cond.L = &f.lock return f @@ -271,6 +300,10 @@ func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) { func (f *DeltaFIFO) HasSynced() bool { f.lock.Lock() defer f.lock.Unlock() + return f.hasSynced_locked() +} + +func (f *DeltaFIFO) hasSynced_locked() bool { return f.populated && f.initialPopulationCount == 0 } @@ -411,6 +444,21 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err if err != nil { return KeyError{obj, err} } + + // Every object comes through this code path once, so this is a good + // place to call the transform func. If obj is a + // DeletedFinalStateUnknown tombstone, then the containted inner object + // will already have gone through the transformer, but we document that + // this can happen. In cases involving Replace(), such an object can + // come through multiple times. + if f.transformer != nil { + var err error + obj, err = f.transformer(obj) + if err != nil { + return err + } + } + oldDeltas := f.items[id] newDeltas := append(oldDeltas, Delta{actionType, obj}) newDeltas = dedupDeltas(newDeltas) @@ -526,6 +574,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { f.cond.Wait() } + isInInitialList := !f.hasSynced_locked() id := f.queue[0] f.queue = f.queue[1:] depth := len(f.queue) @@ -551,7 +600,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { utiltrace.Field{Key: "Reason", Value: "slow event handlers blocking the queue"}) defer trace.LogIfLong(100 * time.Millisecond) } - err := process(item) + err := process(item, isInInitialList) if e, ok := err.(ErrRequeue); ok { f.addIfNotPresent(id, item) err = e.Err @@ -566,12 +615,11 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { // using the Sync or Replace DeltaType and then (2) it does some deletions. // In particular: for every pre-existing key K that is not the key of // an object in `list` there is the effect of -// `Delete(DeletedFinalStateUnknown{K, O})` where O is current object -// of K. If `f.knownObjects == nil` then the pre-existing keys are -// those in `f.items` and the current object of K is the `.Newest()` -// of the Deltas associated with K. Otherwise the pre-existing keys -// are those listed by `f.knownObjects` and the current object of K is -// what `f.knownObjects.GetByKey(K)` returns. +// `Delete(DeletedFinalStateUnknown{K, O})` where O is the latest known +// object of K. The pre-existing keys are those in the union set of the keys in +// `f.items` and `f.knownObjects` (if not nil). The last known object for key K is +// the one present in the last delta in `f.items`. If there is no delta for K +// in `f.items`, it is the object in `f.knownObjects` func (f *DeltaFIFO) Replace(list []interface{}, _ string) error { f.lock.Lock() defer f.lock.Unlock() @@ -595,51 +643,23 @@ func (f *DeltaFIFO) Replace(list []interface{}, _ string) error { } } - if f.knownObjects == nil { - // Do deletion detection against our own list. - queuedDeletions := 0 - for k, oldItem := range f.items { - if keys.Has(k) { - continue - } - // Delete pre-existing items not in the new list. - // This could happen if watch deletion event was missed while - // disconnected from apiserver. - var deletedObj interface{} - if n := oldItem.Newest(); n != nil { - deletedObj = n.Object - } - queuedDeletions++ - if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { - return err - } - } - - if !f.populated { - f.populated = true - // While there shouldn't be any queued deletions in the initial - // population of the queue, it's better to be on the safe side. - f.initialPopulationCount = keys.Len() + queuedDeletions - } - - return nil - } - - // Detect deletions not already in the queue. - knownKeys := f.knownObjects.ListKeys() + // Do deletion detection against objects in the queue queuedDeletions := 0 - for _, k := range knownKeys { + for k, oldItem := range f.items { if keys.Has(k) { continue } - - deletedObj, exists, err := f.knownObjects.GetByKey(k) - if err != nil { - deletedObj = nil - klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) - } else if !exists { - deletedObj = nil - klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) + // Delete pre-existing items not in the new list. + // This could happen if watch deletion event was missed while + // disconnected from apiserver. + var deletedObj interface{} + if n := oldItem.Newest(); n != nil { + deletedObj = n.Object + + // if the previous object is a DeletedFinalStateUnknown, we have to extract the actual Object + if d, ok := deletedObj.(DeletedFinalStateUnknown); ok { + deletedObj = d.Obj + } } queuedDeletions++ if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { @@ -647,6 +667,32 @@ func (f *DeltaFIFO) Replace(list []interface{}, _ string) error { } } + if f.knownObjects != nil { + // Detect deletions for objects not present in the queue, but present in KnownObjects + knownKeys := f.knownObjects.ListKeys() + for _, k := range knownKeys { + if keys.Has(k) { + continue + } + if len(f.items[k]) > 0 { + continue + } + + deletedObj, exists, err := f.knownObjects.GetByKey(k) + if err != nil { + deletedObj = nil + klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) + } else if !exists { + deletedObj = nil + klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) + } + queuedDeletions++ + if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { + return err + } + } + } + if !f.populated { f.populated = true f.initialPopulationCount = keys.Len() + queuedDeletions diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go index 7abdae737..813916ebf 100644 --- a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go @@ -20,18 +20,18 @@ import ( "sync" "time" - "k8s.io/klog/v2" "k8s.io/utils/clock" ) // ExpirationCache implements the store interface -// 1. All entries are automatically time stamped on insert -// a. The key is computed based off the original item/keyFunc -// b. The value inserted under that key is the timestamped item -// 2. Expiration happens lazily on read based on the expiration policy -// a. No item can be inserted into the store while we're expiring -// *any* item in the cache. -// 3. Time-stamps are stripped off unexpired entries before return +// 1. All entries are automatically time stamped on insert +// a. The key is computed based off the original item/keyFunc +// b. The value inserted under that key is the timestamped item +// 2. Expiration happens lazily on read based on the expiration policy +// a. No item can be inserted into the store while we're expiring +// *any* item in the cache. +// 3. Time-stamps are stripped off unexpired entries before return +// // Note that the ExpirationCache is inherently slower than a normal // threadSafeStore because it takes a write lock every time it checks if // an item has expired. @@ -99,7 +99,6 @@ func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) { return nil, false } if c.expirationPolicy.IsExpired(timestampedItem) { - klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.Obj) c.cacheStorage.Delete(key) return nil, false } diff --git a/vendor/k8s.io/client-go/tools/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go index 5c9255027..dd13c4ea7 100644 --- a/vendor/k8s.io/client-go/tools/cache/fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -25,7 +25,7 @@ import ( // PopProcessFunc is passed to Pop() method of Queue interface. // It is supposed to process the accumulator popped from the queue. -type PopProcessFunc func(interface{}) error +type PopProcessFunc func(obj interface{}, isInInitialList bool) error // ErrRequeue may be returned by a PopProcessFunc to safely requeue // the current item. The value of Err will be returned from Pop. @@ -82,9 +82,12 @@ type Queue interface { // Pop is helper function for popping from Queue. // WARNING: Do NOT use this function in non-test code to avoid races // unless you really really really really know what you are doing. +// +// NOTE: This function is deprecated and may be removed in the future without +// additional warning. func Pop(queue Queue) interface{} { var result interface{} - queue.Pop(func(obj interface{}) error { + queue.Pop(func(obj interface{}, isInInitialList bool) error { result = obj return nil }) @@ -103,10 +106,11 @@ func Pop(queue Queue) interface{} { // recent version will be processed. This can't be done with a channel // // FIFO solves this use case: -// * You want to process every object (exactly) once. -// * You want to process the most recent version of the object when you process it. -// * You do not want to process deleted objects, they should be removed from the queue. -// * You do not want to periodically reprocess objects. +// - You want to process every object (exactly) once. +// - You want to process the most recent version of the object when you process it. +// - You do not want to process deleted objects, they should be removed from the queue. +// - You do not want to periodically reprocess objects. +// // Compare with DeltaFIFO for other use cases. type FIFO struct { lock sync.RWMutex @@ -148,6 +152,10 @@ func (f *FIFO) Close() { func (f *FIFO) HasSynced() bool { f.lock.Lock() defer f.lock.Unlock() + return f.hasSynced_locked() +} + +func (f *FIFO) hasSynced_locked() bool { return f.populated && f.initialPopulationCount == 0 } @@ -286,6 +294,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { f.cond.Wait() } + isInInitialList := !f.hasSynced_locked() id := f.queue[0] f.queue = f.queue[1:] if f.initialPopulationCount > 0 { @@ -297,7 +306,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { continue } delete(f.items, id) - err := process(item) + err := process(item, isInInitialList) if e, ok := err.(ErrRequeue); ok { f.addIfNotPresent(id, item) err = e.Err diff --git a/vendor/k8s.io/client-go/tools/cache/index.go b/vendor/k8s.io/client-go/tools/cache/index.go index 1d6aae560..b78d3086b 100644 --- a/vendor/k8s.io/client-go/tools/cache/index.go +++ b/vendor/k8s.io/client-go/tools/cache/index.go @@ -28,10 +28,10 @@ import ( // Delete). // // There are three kinds of strings here: -// 1. a storage key, as defined in the Store interface, -// 2. a name of an index, and -// 3. an "indexed value", which is produced by an IndexFunc and -// can be a field value or any other string computed from the object. +// 1. a storage key, as defined in the Store interface, +// 2. a name of an index, and +// 3. an "indexed value", which is produced by an IndexFunc and +// can be a field value or any other string computed from the object. type Indexer interface { Store // Index returns the stored objects whose set of indexed values @@ -47,7 +47,7 @@ type Indexer interface { // ByIndex returns the stored objects whose set of indexed values // for the named index includes the given indexed value ByIndex(indexName, indexedValue string) ([]interface{}, error) - // GetIndexer return the indexers + // GetIndexers return the indexers GetIndexers() Indexers // AddIndexers adds more indexers to this store. If you call this after you already have data diff --git a/vendor/k8s.io/client-go/tools/cache/listers.go b/vendor/k8s.io/client-go/tools/cache/listers.go index 7885d2f76..420ca7b2a 100644 --- a/vendor/k8s.io/client-go/tools/cache/listers.go +++ b/vendor/k8s.io/client-go/tools/cache/listers.go @@ -53,24 +53,8 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { // ListAllByNamespace used to list items belongs to namespace from Indexer. func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error { - selectAll := selector.Empty() if namespace == metav1.NamespaceAll { - for _, m := range indexer.List() { - if selectAll { - // Avoid computing labels of the objects to speed up common flows - // of listing all objects. - appendFn(m) - continue - } - metadata, err := meta.Accessor(m) - if err != nil { - return err - } - if selector.Matches(labels.Set(metadata.GetLabels())) { - appendFn(m) - } - } - return nil + return ListAll(indexer, selector, appendFn) } items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace}) @@ -89,6 +73,8 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec } return nil } + + selectAll := selector.Empty() for _, m := range items { if selectAll { // Avoid computing labels of the objects to speed up common flows diff --git a/vendor/k8s.io/client-go/tools/cache/object-names.go b/vendor/k8s.io/client-go/tools/cache/object-names.go new file mode 100644 index 000000000..aa8dbb199 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/object-names.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "k8s.io/apimachinery/pkg/types" +) + +// ObjectName is a reference to an object of some implicit kind +type ObjectName struct { + Namespace string + Name string +} + +// NewObjectName constructs a new one +func NewObjectName(namespace, name string) ObjectName { + return ObjectName{Namespace: namespace, Name: name} +} + +// Parts is the inverse of the constructor +func (objName ObjectName) Parts() (namespace, name string) { + return objName.Namespace, objName.Name +} + +// String returns the standard string encoding, +// which is designed to match the historical behavior of MetaNamespaceKeyFunc. +// Note this behavior is different from the String method of types.NamespacedName. +func (objName ObjectName) String() string { + if len(objName.Namespace) > 0 { + return objName.Namespace + "/" + objName.Name + } + return objName.Name +} + +// ParseObjectName tries to parse the standard encoding +func ParseObjectName(str string) (ObjectName, error) { + var objName ObjectName + var err error + objName.Namespace, objName.Name, err = SplitMetaNamespaceKey(str) + return objName, err +} + +// NamespacedNameAsObjectName rebrands the given NamespacedName as an ObjectName +func NamespacedNameAsObjectName(nn types.NamespacedName) ObjectName { + return NewObjectName(nn.Namespace, nn.Name) +} + +// AsNamespacedName rebrands as a NamespacedName +func (objName ObjectName) AsNamespacedName() types.NamespacedName { + return types.NamespacedName{Namespace: objName.Namespace, Name: objName.Name} +} diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index 84f242116..45eaff528 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -22,7 +22,9 @@ import ( "fmt" "io" "math/rand" + "os" "reflect" + "strings" "sync" "time" @@ -40,6 +42,7 @@ import ( "k8s.io/client-go/tools/pager" "k8s.io/klog/v2" "k8s.io/utils/clock" + "k8s.io/utils/pointer" "k8s.io/utils/trace" ) @@ -49,12 +52,11 @@ const defaultExpectedTypeName = "" type Reflector struct { // name identifies this reflector. By default it will be a file:line if possible. name string - // The name of the type we expect to place in the store. The name // will be the stringification of expectedGVK if provided, and the // stringification of expectedType otherwise. It is for display // only, and should not be used for parsing or comparison. - expectedTypeName string + typeDescription string // An example object of the type we expect to place in the store. // Only the type needs to be right, except that when that is // `unstructured.Unstructured` the object's `"apiVersion"` and @@ -66,15 +68,9 @@ type Reflector struct { store Store // listerWatcher is used to perform lists and watches. listerWatcher ListerWatcher - // backoff manages backoff of ListWatch backoffManager wait.BackoffManager - // initConnBackoffManager manages backoff the initial connection with the Watch call of ListAndWatch. - initConnBackoffManager wait.BackoffManager - - resyncPeriod time.Duration - // ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked - ShouldResync func() bool + resyncPeriod time.Duration // clock allows tests to manipulate time clock clock.Clock // paginatedResult defines whether pagination should be forced for list calls. @@ -89,6 +85,8 @@ type Reflector struct { isLastSyncResourceVersionUnavailable bool // lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion lastSyncResourceVersionMutex sync.RWMutex + // Called whenever the ListAndWatch drops the connection with an error. + watchErrorHandler WatchErrorHandler // WatchListPageSize is the requested chunk size of initial and resync watch lists. // If unset, for consistent reads (RV="") or reads that opt-into arbitrarily old data // (RV="0") it will default to pager.PageSize, for the rest (RV != "" && RV != "0") @@ -97,8 +95,19 @@ type Reflector struct { // etcd, which is significantly less efficient and may lead to serious performance and // scalability problems. WatchListPageSize int64 - // Called whenever the ListAndWatch drops the connection with an error. - watchErrorHandler WatchErrorHandler + // ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked + ShouldResync func() bool + // MaxInternalErrorRetryDuration defines how long we should retry internal errors returned by watch. + MaxInternalErrorRetryDuration time.Duration + // UseWatchList if turned on instructs the reflector to open a stream to bring data from the API server. + // Streaming has the primary advantage of using fewer server's resources to fetch data. + // + // The old behaviour establishes a LIST request which gets data in chunks. + // Paginated list is less efficient and depending on the actual size of objects + // might result in an increased memory consumption of the APIServer. + // + // See https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#design-details + UseWatchList bool } // ResourceVersionUpdater is an interface that allows store implementation to @@ -129,13 +138,13 @@ func DefaultWatchErrorHandler(r *Reflector, err error) { // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already // has a semantic that it returns data at least as fresh as provided RV. // So first try to LIST with setting RV to resource version of last observed object. - klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err) + klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.typeDescription, err) case err == io.EOF: // watch closed normally case err == io.ErrUnexpectedEOF: - klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedTypeName, err) + klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.typeDescription, err) default: - utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedTypeName, err)) + utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.typeDescription, err)) } } @@ -153,7 +162,40 @@ func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interfa return indexer, reflector } -// NewReflector creates a new Reflector object which will keep the +// NewReflector creates a new Reflector with its name defaulted to the closest source_file.go:line in the call stack +// that is outside this package. See NewReflectorWithOptions for further information. +func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + return NewReflectorWithOptions(lw, expectedType, store, ReflectorOptions{ResyncPeriod: resyncPeriod}) +} + +// NewNamedReflector creates a new Reflector with the specified name. See NewReflectorWithOptions for further +// information. +func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + return NewReflectorWithOptions(lw, expectedType, store, ReflectorOptions{Name: name, ResyncPeriod: resyncPeriod}) +} + +// ReflectorOptions configures a Reflector. +type ReflectorOptions struct { + // Name is the Reflector's name. If unset/unspecified, the name defaults to the closest source_file.go:line + // in the call stack that is outside this package. + Name string + + // TypeDescription is the Reflector's type description. If unset/unspecified, the type description is defaulted + // using the following rules: if the expectedType passed to NewReflectorWithOptions was nil, the type description is + // "". If the expectedType is an instance of *unstructured.Unstructured and its apiVersion and kind fields + // are set, the type description is the string encoding of those. Otherwise, the type description is set to the + // go type of expectedType.. + TypeDescription string + + // ResyncPeriod is the Reflector's resync period. If unset/unspecified, the resync period defaults to 0 + // (do not resync). + ResyncPeriod time.Duration + + // Clock allows tests to control time. If unset defaults to clock.RealClock{} + Clock clock.Clock +} + +// NewReflectorWithOptions creates a new Reflector object which will keep the // given store up to date with the server's contents for the given // resource. Reflector promises to only put things in the store that // have the type of expectedType, unless expectedType is nil. If @@ -163,49 +205,77 @@ func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interfa // "yes". This enables you to use reflectors to periodically process // everything as well as incrementally processing the things that // change. -func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { - return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod) -} - -// NewNamedReflector same as NewReflector, but with a specified name for logging -func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { - realClock := &clock.RealClock{} +func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store Store, options ReflectorOptions) *Reflector { + reflectorClock := options.Clock + if reflectorClock == nil { + reflectorClock = clock.RealClock{} + } r := &Reflector{ - name: name, - listerWatcher: lw, - store: store, + name: options.Name, + resyncPeriod: options.ResyncPeriod, + typeDescription: options.TypeDescription, + listerWatcher: lw, + store: store, // We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when // API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is // 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff. - backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), - initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), - resyncPeriod: resyncPeriod, - clock: realClock, - watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler), + backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock), + clock: reflectorClock, + watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler), + expectedType: reflect.TypeOf(expectedType), + } + + if r.name == "" { + r.name = naming.GetNameFromCallsite(internalPackages...) + } + + if r.typeDescription == "" { + r.typeDescription = getTypeDescriptionFromObject(expectedType) } - r.setExpectedType(expectedType) + + if r.expectedGVK == nil { + r.expectedGVK = getExpectedGVKFromObject(expectedType) + } + + if s := os.Getenv("ENABLE_CLIENT_GO_WATCH_LIST_ALPHA"); len(s) > 0 { + r.UseWatchList = true + } + return r } -func (r *Reflector) setExpectedType(expectedType interface{}) { - r.expectedType = reflect.TypeOf(expectedType) - if r.expectedType == nil { - r.expectedTypeName = defaultExpectedTypeName - return +func getTypeDescriptionFromObject(expectedType interface{}) string { + if expectedType == nil { + return defaultExpectedTypeName } - r.expectedTypeName = r.expectedType.String() + reflectDescription := reflect.TypeOf(expectedType).String() - if obj, ok := expectedType.(*unstructured.Unstructured); ok { - // Use gvk to check that watch event objects are of the desired type. - gvk := obj.GroupVersionKind() - if gvk.Empty() { - klog.V(4).Infof("Reflector from %s configured with expectedType of *unstructured.Unstructured with empty GroupVersionKind.", r.name) - return - } - r.expectedGVK = &gvk - r.expectedTypeName = gvk.String() + obj, ok := expectedType.(*unstructured.Unstructured) + if !ok { + return reflectDescription + } + + gvk := obj.GroupVersionKind() + if gvk.Empty() { + return reflectDescription + } + + return gvk.String() +} + +func getExpectedGVKFromObject(expectedType interface{}) *schema.GroupVersionKind { + obj, ok := expectedType.(*unstructured.Unstructured) + if !ok { + return nil + } + + gvk := obj.GroupVersionKind() + if gvk.Empty() { + return nil } + + return &gvk } // internalPackages are packages that ignored when creating a default reflector name. These packages are in the common @@ -216,13 +286,13 @@ var internalPackages = []string{"client-go/tools/cache/"} // objects and subsequent deltas. // Run will exit when stopCh is closed. func (r *Reflector) Run(stopCh <-chan struct{}) { - klog.V(3).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) + klog.V(3).Infof("Starting reflector %s (%s) from %s", r.typeDescription, r.resyncPeriod, r.name) wait.BackoffUntil(func() { if err := r.ListAndWatch(stopCh); err != nil { r.watchErrorHandler(r, err) } }, r.backoffManager, true, stopCh) - klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) + klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.typeDescription, r.resyncPeriod, r.name) } var ( @@ -252,143 +322,74 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { // and then use the resource version to watch. // It returns error if ListAndWatch didn't even try to initialize watch. func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { - klog.V(3).Infof("Listing and watching %v from %s", r.expectedTypeName, r.name) - var resourceVersion string - - options := metav1.ListOptions{ResourceVersion: r.relistResourceVersion()} - - if err := func() error { - initTrace := trace.New("Reflector ListAndWatch", trace.Field{Key: "name", Value: r.name}) - defer initTrace.LogIfLong(10 * time.Second) - var list runtime.Object - var paginatedResult bool - var err error - listCh := make(chan struct{}, 1) - panicCh := make(chan interface{}, 1) - go func() { - defer func() { - if r := recover(); r != nil { - panicCh <- r - } - }() - // Attempt to gather list in chunks, if supported by listerWatcher, if not, the first - // list request will return the full response. - pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { - return r.listerWatcher.List(opts) - })) - switch { - case r.WatchListPageSize != 0: - pager.PageSize = r.WatchListPageSize - case r.paginatedResult: - // We got a paginated result initially. Assume this resource and server honor - // paging requests (i.e. watch cache is probably disabled) and leave the default - // pager size set. - case options.ResourceVersion != "" && options.ResourceVersion != "0": - // User didn't explicitly request pagination. - // - // With ResourceVersion != "", we have a possibility to list from watch cache, - // but we do that (for ResourceVersion != "0") only if Limit is unset. - // To avoid thundering herd on etcd (e.g. on master upgrades), we explicitly - // switch off pagination to force listing from watch cache (if enabled). - // With the existing semantic of RV (result is at least as fresh as provided RV), - // this is correct and doesn't lead to going back in time. - // - // We also don't turn off pagination for ResourceVersion="0", since watch cache - // is ignoring Limit in that case anyway, and if watch cache is not enabled - // we don't introduce regression. - pager.PageSize = 0 - } - - list, paginatedResult, err = pager.List(context.Background(), options) - if isExpiredError(err) || isTooLargeResourceVersionError(err) { - r.setIsLastSyncResourceVersionUnavailable(true) - // Retry immediately if the resource version used to list is unavailable. - // The pager already falls back to full list if paginated list calls fail due to an "Expired" error on - // continuation pages, but the pager might not be enabled, the full list might fail because the - // resource version it is listing at is expired or the cache may not yet be synced to the provided - // resource version. So we need to fallback to resourceVersion="" in all to recover and ensure - // the reflector makes forward progress. - list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}) - } - close(listCh) - }() - select { - case <-stopCh: + klog.V(3).Infof("Listing and watching %v from %s", r.typeDescription, r.name) + var err error + var w watch.Interface + fallbackToList := !r.UseWatchList + + if r.UseWatchList { + w, err = r.watchList(stopCh) + if w == nil && err == nil { + // stopCh was closed return nil - case r := <-panicCh: - panic(r) - case <-listCh: } - initTrace.Step("Objects listed", trace.Field{Key: "error", Value: err}) if err != nil { - klog.Warningf("%s: failed to list %v: %v", r.name, r.expectedTypeName, err) - return fmt.Errorf("failed to list %v: %v", r.expectedTypeName, err) - } - - // We check if the list was paginated and if so set the paginatedResult based on that. - // However, we want to do that only for the initial list (which is the only case - // when we set ResourceVersion="0"). The reasoning behind it is that later, in some - // situations we may force listing directly from etcd (by setting ResourceVersion="") - // which will return paginated result, even if watch cache is enabled. However, in - // that case, we still want to prefer sending requests to watch cache if possible. - // - // Paginated result returned for request with ResourceVersion="0" mean that watch - // cache is disabled and there are a lot of objects of a given type. In such case, - // there is no need to prefer listing from watch cache. - if options.ResourceVersion == "0" && paginatedResult { - r.paginatedResult = true + if !apierrors.IsInvalid(err) { + return err + } + klog.Warning("the watch-list feature is not supported by the server, falling back to the previous LIST/WATCH semantic") + fallbackToList = true + // Ensure that we won't accidentally pass some garbage down the watch. + w = nil } + } - r.setIsLastSyncResourceVersionUnavailable(false) // list was successful - listMetaInterface, err := meta.ListAccessor(list) + if fallbackToList { + err = r.list(stopCh) if err != nil { - return fmt.Errorf("unable to understand list result %#v: %v", list, err) - } - resourceVersion = listMetaInterface.GetResourceVersion() - initTrace.Step("Resource version extracted") - items, err := meta.ExtractList(list) - if err != nil { - return fmt.Errorf("unable to understand list result %#v (%v)", list, err) - } - initTrace.Step("Objects extracted") - if err := r.syncWith(items, resourceVersion); err != nil { - return fmt.Errorf("unable to sync list result: %v", err) + return err } - initTrace.Step("SyncWith done") - r.setLastSyncResourceVersion(resourceVersion) - initTrace.Step("Resource version updated") - return nil - }(); err != nil { - return err } resyncerrc := make(chan error, 1) cancelCh := make(chan struct{}) defer close(cancelCh) - go func() { - resyncCh, cleanup := r.resyncChan() - defer func() { - cleanup() // Call the last one written into cleanup - }() - for { - select { - case <-resyncCh: - case <-stopCh: - return - case <-cancelCh: + go r.startResync(stopCh, cancelCh, resyncerrc) + return r.watch(w, stopCh, resyncerrc) +} + +// startResync periodically calls r.store.Resync() method. +// Note that this method is blocking and should be +// called in a separate goroutine. +func (r *Reflector) startResync(stopCh <-chan struct{}, cancelCh <-chan struct{}, resyncerrc chan error) { + resyncCh, cleanup := r.resyncChan() + defer func() { + cleanup() // Call the last one written into cleanup + }() + for { + select { + case <-resyncCh: + case <-stopCh: + return + case <-cancelCh: + return + } + if r.ShouldResync == nil || r.ShouldResync() { + klog.V(4).Infof("%s: forcing resync", r.name) + if err := r.store.Resync(); err != nil { + resyncerrc <- err return } - if r.ShouldResync == nil || r.ShouldResync() { - klog.V(4).Infof("%s: forcing resync", r.name) - if err := r.store.Resync(); err != nil { - resyncerrc <- err - return - } - } - cleanup() - resyncCh, cleanup = r.resyncChan() } - }() + cleanup() + resyncCh, cleanup = r.resyncChan() + } +} + +// watch simply starts a watch request with the server. +func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc chan error) error { + var err error + retry := NewRetryWithDeadline(r.MaxInternalErrorRetryDuration, time.Minute, apierrors.IsInternalError, r.clock) for { // give the stopCh a chance to stop the loop, even in case of continue statements further down on errors @@ -398,48 +399,63 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { default: } - timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) - options = metav1.ListOptions{ - ResourceVersion: resourceVersion, - // We want to avoid situations of hanging watchers. Stop any watchers that do not - // receive any events within the timeout window. - TimeoutSeconds: &timeoutSeconds, - // To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks. - // Reflector doesn't assume bookmarks are returned at all (if the server do not support - // watch bookmarks, it will ignore this field). - AllowWatchBookmarks: true, - } - // start the clock before sending the request, since some proxies won't flush headers until after the first watch event is sent start := r.clock.Now() - w, err := r.listerWatcher.Watch(options) - if err != nil { - // If this is "connection refused" error, it means that most likely apiserver is not responsive. - // It doesn't make sense to re-list all objects because most likely we will be able to restart - // watch where we ended. - // If that's the case begin exponentially backing off and resend watch request. - // Do the same for "429" errors. - if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) { - <-r.initConnBackoffManager.Backoff().C() - continue + + if w == nil { + timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + options := metav1.ListOptions{ + ResourceVersion: r.LastSyncResourceVersion(), + // We want to avoid situations of hanging watchers. Stop any watchers that do not + // receive any events within the timeout window. + TimeoutSeconds: &timeoutSeconds, + // To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks. + // Reflector doesn't assume bookmarks are returned at all (if the server do not support + // watch bookmarks, it will ignore this field). + AllowWatchBookmarks: true, + } + + w, err = r.listerWatcher.Watch(options) + if err != nil { + if canRetry := isWatchErrorRetriable(err); canRetry { + klog.V(4).Infof("%s: watch of %v returned %v - backing off", r.name, r.typeDescription, err) + select { + case <-stopCh: + return nil + case <-r.backoffManager.Backoff().C(): + continue + } + } + return err } - return err } - if err := r.watchHandler(start, w, &resourceVersion, resyncerrc, stopCh); err != nil { + err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.typeDescription, r.setLastSyncResourceVersion, nil, r.clock, resyncerrc, stopCh) + // Ensure that watch will not be reused across iterations. + w.Stop() + w = nil + retry.After(err) + if err != nil { if err != errorStopRequested { switch { case isExpiredError(err): // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already // has a semantic that it returns data at least as fresh as provided RV. // So first try to LIST with setting RV to resource version of last observed object. - klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err) + klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.typeDescription, err) case apierrors.IsTooManyRequests(err): - klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.expectedTypeName) - <-r.initConnBackoffManager.Backoff().C() + klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.typeDescription) + select { + case <-stopCh: + return nil + case <-r.backoffManager.Backoff().C(): + continue + } + case apierrors.IsInternalError(err) && retry.ShouldRetry(): + klog.V(2).Infof("%s: retrying watch of %v internal error: %v", r.name, r.typeDescription, err) continue default: - klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err) + klog.Warningf("%s: watch of %v ended with: %v", r.name, r.typeDescription, err) } } return nil @@ -447,6 +463,222 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { } } +// list simply lists all items and records a resource version obtained from the server at the moment of the call. +// the resource version can be used for further progress notification (aka. watch). +func (r *Reflector) list(stopCh <-chan struct{}) error { + var resourceVersion string + options := metav1.ListOptions{ResourceVersion: r.relistResourceVersion()} + + initTrace := trace.New("Reflector ListAndWatch", trace.Field{Key: "name", Value: r.name}) + defer initTrace.LogIfLong(10 * time.Second) + var list runtime.Object + var paginatedResult bool + var err error + listCh := make(chan struct{}, 1) + panicCh := make(chan interface{}, 1) + go func() { + defer func() { + if r := recover(); r != nil { + panicCh <- r + } + }() + // Attempt to gather list in chunks, if supported by listerWatcher, if not, the first + // list request will return the full response. + pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { + return r.listerWatcher.List(opts) + })) + switch { + case r.WatchListPageSize != 0: + pager.PageSize = r.WatchListPageSize + case r.paginatedResult: + // We got a paginated result initially. Assume this resource and server honor + // paging requests (i.e. watch cache is probably disabled) and leave the default + // pager size set. + case options.ResourceVersion != "" && options.ResourceVersion != "0": + // User didn't explicitly request pagination. + // + // With ResourceVersion != "", we have a possibility to list from watch cache, + // but we do that (for ResourceVersion != "0") only if Limit is unset. + // To avoid thundering herd on etcd (e.g. on master upgrades), we explicitly + // switch off pagination to force listing from watch cache (if enabled). + // With the existing semantic of RV (result is at least as fresh as provided RV), + // this is correct and doesn't lead to going back in time. + // + // We also don't turn off pagination for ResourceVersion="0", since watch cache + // is ignoring Limit in that case anyway, and if watch cache is not enabled + // we don't introduce regression. + pager.PageSize = 0 + } + + list, paginatedResult, err = pager.ListWithAlloc(context.Background(), options) + if isExpiredError(err) || isTooLargeResourceVersionError(err) { + r.setIsLastSyncResourceVersionUnavailable(true) + // Retry immediately if the resource version used to list is unavailable. + // The pager already falls back to full list if paginated list calls fail due to an "Expired" error on + // continuation pages, but the pager might not be enabled, the full list might fail because the + // resource version it is listing at is expired or the cache may not yet be synced to the provided + // resource version. So we need to fallback to resourceVersion="" in all to recover and ensure + // the reflector makes forward progress. + list, paginatedResult, err = pager.ListWithAlloc(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}) + } + close(listCh) + }() + select { + case <-stopCh: + return nil + case r := <-panicCh: + panic(r) + case <-listCh: + } + initTrace.Step("Objects listed", trace.Field{Key: "error", Value: err}) + if err != nil { + klog.Warningf("%s: failed to list %v: %v", r.name, r.typeDescription, err) + return fmt.Errorf("failed to list %v: %w", r.typeDescription, err) + } + + // We check if the list was paginated and if so set the paginatedResult based on that. + // However, we want to do that only for the initial list (which is the only case + // when we set ResourceVersion="0"). The reasoning behind it is that later, in some + // situations we may force listing directly from etcd (by setting ResourceVersion="") + // which will return paginated result, even if watch cache is enabled. However, in + // that case, we still want to prefer sending requests to watch cache if possible. + // + // Paginated result returned for request with ResourceVersion="0" mean that watch + // cache is disabled and there are a lot of objects of a given type. In such case, + // there is no need to prefer listing from watch cache. + if options.ResourceVersion == "0" && paginatedResult { + r.paginatedResult = true + } + + r.setIsLastSyncResourceVersionUnavailable(false) // list was successful + listMetaInterface, err := meta.ListAccessor(list) + if err != nil { + return fmt.Errorf("unable to understand list result %#v: %v", list, err) + } + resourceVersion = listMetaInterface.GetResourceVersion() + initTrace.Step("Resource version extracted") + items, err := meta.ExtractListWithAlloc(list) + if err != nil { + return fmt.Errorf("unable to understand list result %#v (%v)", list, err) + } + initTrace.Step("Objects extracted") + if err := r.syncWith(items, resourceVersion); err != nil { + return fmt.Errorf("unable to sync list result: %v", err) + } + initTrace.Step("SyncWith done") + r.setLastSyncResourceVersion(resourceVersion) + initTrace.Step("Resource version updated") + return nil +} + +// watchList establishes a stream to get a consistent snapshot of data +// from the server as described in https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#proposal +// +// case 1: start at Most Recent (RV="", ResourceVersionMatch=ResourceVersionMatchNotOlderThan) +// Establishes a consistent stream with the server. +// That means the returned data is consistent, as if, served directly from etcd via a quorum read. +// It begins with synthetic "Added" events of all resources up to the most recent ResourceVersion. +// It ends with a synthetic "Bookmark" event containing the most recent ResourceVersion. +// After receiving a "Bookmark" event the reflector is considered to be synchronized. +// It replaces its internal store with the collected items and +// reuses the current watch requests for getting further events. +// +// case 2: start at Exact (RV>"0", ResourceVersionMatch=ResourceVersionMatchNotOlderThan) +// Establishes a stream with the server at the provided resource version. +// To establish the initial state the server begins with synthetic "Added" events. +// It ends with a synthetic "Bookmark" event containing the provided or newer resource version. +// After receiving a "Bookmark" event the reflector is considered to be synchronized. +// It replaces its internal store with the collected items and +// reuses the current watch requests for getting further events. +func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { + var w watch.Interface + var err error + var temporaryStore Store + var resourceVersion string + // TODO(#115478): see if this function could be turned + // into a method and see if error handling + // could be unified with the r.watch method + isErrorRetriableWithSideEffectsFn := func(err error) bool { + if canRetry := isWatchErrorRetriable(err); canRetry { + klog.V(2).Infof("%s: watch-list of %v returned %v - backing off", r.name, r.typeDescription, err) + <-r.backoffManager.Backoff().C() + return true + } + if isExpiredError(err) || isTooLargeResourceVersionError(err) { + // we tried to re-establish a watch request but the provided RV + // has either expired or it is greater than the server knows about. + // In that case we reset the RV and + // try to get a consistent snapshot from the watch cache (case 1) + r.setIsLastSyncResourceVersionUnavailable(true) + return true + } + return false + } + + initTrace := trace.New("Reflector WatchList", trace.Field{Key: "name", Value: r.name}) + defer initTrace.LogIfLong(10 * time.Second) + for { + select { + case <-stopCh: + return nil, nil + default: + } + + resourceVersion = "" + lastKnownRV := r.rewatchResourceVersion() + temporaryStore = NewStore(DeletionHandlingMetaNamespaceKeyFunc) + // TODO(#115478): large "list", slow clients, slow network, p&f + // might slow down streaming and eventually fail. + // maybe in such a case we should retry with an increased timeout? + timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + options := metav1.ListOptions{ + ResourceVersion: lastKnownRV, + AllowWatchBookmarks: true, + SendInitialEvents: pointer.Bool(true), + ResourceVersionMatch: metav1.ResourceVersionMatchNotOlderThan, + TimeoutSeconds: &timeoutSeconds, + } + start := r.clock.Now() + + w, err = r.listerWatcher.Watch(options) + if err != nil { + if isErrorRetriableWithSideEffectsFn(err) { + continue + } + return nil, err + } + bookmarkReceived := pointer.Bool(false) + err = watchHandler(start, w, temporaryStore, r.expectedType, r.expectedGVK, r.name, r.typeDescription, + func(rv string) { resourceVersion = rv }, + bookmarkReceived, + r.clock, make(chan error), stopCh) + if err != nil { + w.Stop() // stop and retry with clean state + if err == errorStopRequested { + return nil, nil + } + if isErrorRetriableWithSideEffectsFn(err) { + continue + } + return nil, err + } + if *bookmarkReceived { + break + } + } + // We successfully got initial state from watch-list confirmed by the + // "k8s.io/initial-events-end" bookmark. + initTrace.Step("Objects streamed", trace.Field{Key: "count", Value: len(temporaryStore.List())}) + r.setIsLastSyncResourceVersionUnavailable(false) + if err = r.store.Replace(temporaryStore.List(), resourceVersion); err != nil { + return nil, fmt.Errorf("unable to sync watch-list result: %v", err) + } + initTrace.Step("SyncWith done") + r.setLastSyncResourceVersion(resourceVersion) + + return w, nil +} + // syncWith replaces the store's items with the given list. func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error { found := make([]interface{}, 0, len(items)) @@ -456,13 +688,26 @@ func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) err return r.store.Replace(found, resourceVersion) } -// watchHandler watches w and keeps *resourceVersion up to date. -func (r *Reflector) watchHandler(start time.Time, w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error { +// watchHandler watches w and sets setLastSyncResourceVersion +func watchHandler(start time.Time, + w watch.Interface, + store Store, + expectedType reflect.Type, + expectedGVK *schema.GroupVersionKind, + name string, + expectedTypeName string, + setLastSyncResourceVersion func(string), + exitOnInitialEventsEndBookmark *bool, + clock clock.Clock, + errc chan error, + stopCh <-chan struct{}, +) error { eventCount := 0 - - // Stopping the watcher should be idempotent and if we return from this function there's no way - // we're coming back in with the same watch interface. - defer w.Stop() + if exitOnInitialEventsEndBookmark != nil { + // set it to false just in case somebody + // made it positive + *exitOnInitialEventsEndBookmark = false + } loop: for { @@ -478,62 +723,71 @@ loop: if event.Type == watch.Error { return apierrors.FromObject(event.Object) } - if r.expectedType != nil { - if e, a := r.expectedType, reflect.TypeOf(event.Object); e != a { - utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a)) + if expectedType != nil { + if e, a := expectedType, reflect.TypeOf(event.Object); e != a { + utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", name, e, a)) continue } } - if r.expectedGVK != nil { - if e, a := *r.expectedGVK, event.Object.GetObjectKind().GroupVersionKind(); e != a { - utilruntime.HandleError(fmt.Errorf("%s: expected gvk %v, but watch event object had gvk %v", r.name, e, a)) + if expectedGVK != nil { + if e, a := *expectedGVK, event.Object.GetObjectKind().GroupVersionKind(); e != a { + utilruntime.HandleError(fmt.Errorf("%s: expected gvk %v, but watch event object had gvk %v", name, e, a)) continue } } meta, err := meta.Accessor(event.Object) if err != nil { - utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event)) + utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event)) continue } - newResourceVersion := meta.GetResourceVersion() + resourceVersion := meta.GetResourceVersion() switch event.Type { case watch.Added: - err := r.store.Add(event.Object) + err := store.Add(event.Object) if err != nil { - utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", r.name, event.Object, err)) + utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", name, event.Object, err)) } case watch.Modified: - err := r.store.Update(event.Object) + err := store.Update(event.Object) if err != nil { - utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", r.name, event.Object, err)) + utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", name, event.Object, err)) } case watch.Deleted: // TODO: Will any consumers need access to the "last known // state", which is passed in event.Object? If so, may need // to change this. - err := r.store.Delete(event.Object) + err := store.Delete(event.Object) if err != nil { - utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err)) + utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", name, event.Object, err)) } case watch.Bookmark: // A `Bookmark` means watch has synced here, just update the resourceVersion + if _, ok := meta.GetAnnotations()["k8s.io/initial-events-end"]; ok { + if exitOnInitialEventsEndBookmark != nil { + *exitOnInitialEventsEndBookmark = true + } + } default: - utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event)) + utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event)) } - *resourceVersion = newResourceVersion - r.setLastSyncResourceVersion(newResourceVersion) - if rvu, ok := r.store.(ResourceVersionUpdater); ok { - rvu.UpdateResourceVersion(newResourceVersion) + setLastSyncResourceVersion(resourceVersion) + if rvu, ok := store.(ResourceVersionUpdater); ok { + rvu.UpdateResourceVersion(resourceVersion) } eventCount++ + if exitOnInitialEventsEndBookmark != nil && *exitOnInitialEventsEndBookmark { + watchDuration := clock.Since(start) + klog.V(4).Infof("exiting %v Watch because received the bookmark that marks the end of initial events stream, total %v items received in %v", name, eventCount, watchDuration) + return nil + } } } - watchDuration := r.clock.Since(start) + watchDuration := clock.Since(start) if watchDuration < 1*time.Second && eventCount == 0 { - return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name) + return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", name) } - klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedTypeName, eventCount) + klog.V(4).Infof("%s: Watch close - %v total %v items received", name, expectedTypeName, eventCount) return nil } @@ -574,6 +828,18 @@ func (r *Reflector) relistResourceVersion() string { return r.lastSyncResourceVersion } +// rewatchResourceVersion determines the resource version the reflector should start streaming from. +func (r *Reflector) rewatchResourceVersion() string { + r.lastSyncResourceVersionMutex.RLock() + defer r.lastSyncResourceVersionMutex.RUnlock() + if r.isLastSyncResourceVersionUnavailable { + // initial stream should return data at the most recent resource version. + // the returned data must be consistent i.e. as if served from etcd via a quorum read + return "" + } + return r.lastSyncResourceVersion +} + // setIsLastSyncResourceVersionUnavailable sets if the last list or watch request with lastSyncResourceVersion returned // "expired" or "too large resource version" error. func (r *Reflector) setIsLastSyncResourceVersionUnavailable(isUnavailable bool) { @@ -612,5 +878,25 @@ func isTooLargeResourceVersionError(err error) bool { return true } } + + // Matches the message returned by api server before 1.17.0 + if strings.Contains(apierr.Status().Message, "Too large resource version") { + return true + } + + return false +} + +// isWatchErrorRetriable determines if it is safe to retry +// a watch error retrieved from the server. +func isWatchErrorRetriable(err error) bool { + // If this is "connection refused" error, it means that most likely apiserver is not responsive. + // It doesn't make sense to re-list all objects because most likely we will be able to restart + // watch where we ended. + // If that's the case begin exponentially backing off and resend watch request. + // Do the same for "429" errors. + if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) { + return true + } return false } diff --git a/vendor/k8s.io/client-go/tools/cache/retry_with_deadline.go b/vendor/k8s.io/client-go/tools/cache/retry_with_deadline.go new file mode 100644 index 000000000..8201fb153 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/retry_with_deadline.go @@ -0,0 +1,78 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "k8s.io/utils/clock" + "time" +) + +type RetryWithDeadline interface { + After(error) + ShouldRetry() bool +} + +type retryWithDeadlineImpl struct { + firstErrorTime time.Time + lastErrorTime time.Time + maxRetryDuration time.Duration + minResetPeriod time.Duration + isRetryable func(error) bool + clock clock.Clock +} + +func NewRetryWithDeadline(maxRetryDuration, minResetPeriod time.Duration, isRetryable func(error) bool, clock clock.Clock) RetryWithDeadline { + return &retryWithDeadlineImpl{ + firstErrorTime: time.Time{}, + lastErrorTime: time.Time{}, + maxRetryDuration: maxRetryDuration, + minResetPeriod: minResetPeriod, + isRetryable: isRetryable, + clock: clock, + } +} + +func (r *retryWithDeadlineImpl) reset() { + r.firstErrorTime = time.Time{} + r.lastErrorTime = time.Time{} +} + +func (r *retryWithDeadlineImpl) After(err error) { + if r.isRetryable(err) { + if r.clock.Now().Sub(r.lastErrorTime) >= r.minResetPeriod { + r.reset() + } + + if r.firstErrorTime.IsZero() { + r.firstErrorTime = r.clock.Now() + } + r.lastErrorTime = r.clock.Now() + } +} + +func (r *retryWithDeadlineImpl) ShouldRetry() bool { + if r.maxRetryDuration <= time.Duration(0) { + return false + } + + if r.clock.Now().Sub(r.firstErrorTime) <= r.maxRetryDuration { + return true + } + + r.reset() + return false +} diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index 9f42782d1..be8694ddb 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache/synctrack" "k8s.io/utils/buffer" "k8s.io/utils/clock" @@ -132,10 +133,14 @@ import ( // state, except that its ResourceVersion is replaced with a // ResourceVersion in which the object is actually absent. type SharedInformer interface { - // AddEventHandler adds an event handler to the shared informer using the shared informer's resync - // period. Events to a single handler are delivered sequentially, but there is no coordination - // between different handlers. - AddEventHandler(handler ResourceEventHandler) + // AddEventHandler adds an event handler to the shared informer using + // the shared informer's resync period. Events to a single handler are + // delivered sequentially, but there is no coordination between + // different handlers. + // It returns a registration handle for the handler that can be used to + // remove the handler again, or to tell if the handler is synced (has + // seen every item in the initial list). + AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error) // AddEventHandlerWithResyncPeriod adds an event handler to the // shared informer with the requested resync period; zero means // this handler does not care about resyncs. The resync operation @@ -150,7 +155,13 @@ type SharedInformer interface { // between any two resyncs may be longer than the nominal period // because the implementation takes time to do work and there may // be competing load and scheduling noise. - AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) + // It returns a registration handle for the handler that can be used to remove + // the handler again and an error if the handler cannot be added. + AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) (ResourceEventHandlerRegistration, error) + // RemoveEventHandler removes a formerly added event handler given by + // its registration handle. + // This function is guaranteed to be idempotent, and thread-safe. + RemoveEventHandler(handle ResourceEventHandlerRegistration) error // GetStore returns the informer's local cache as a Store. GetStore() Store // GetController is deprecated, it does nothing useful @@ -161,6 +172,10 @@ type SharedInformer interface { // HasSynced returns true if the shared informer's store has been // informed by at least one full LIST of the authoritative state // of the informer's object collection. This is unrelated to "resync". + // + // Note that this doesn't tell you if an individual handler is synced!! + // For that, please call HasSynced on the handle returned by + // AddEventHandler. HasSynced() bool // LastSyncResourceVersion is the resource version observed when last synced with the underlying // store. The value returned is not synchronized with access to the underlying store and is not @@ -190,11 +205,25 @@ type SharedInformer interface { // // Must be set before starting the informer. // - // Note: Since the object given to the handler may be already shared with - // other goroutines, it is advisable to copy the object being - // transform before mutating it at all and returning the copy to prevent - // data races. + // Please see the comment on TransformFunc for more details. SetTransform(handler TransformFunc) error + + // IsStopped reports whether the informer has already been stopped. + // Adding event handlers to already stopped informers is not possible. + // An informer already stopped will never be started again. + IsStopped() bool +} + +// Opaque interface representing the registration of ResourceEventHandler for +// a SharedInformer. Must be supplied back to the same SharedInformer's +// `RemoveEventHandler` to unregister the handlers. +// +// Also used to tell if the handler is synced (has had all items in the initial +// list delivered). +type ResourceEventHandlerRegistration interface { + // HasSynced reports if both the parent has synced and all pre-sync + // events have been delivered. + HasSynced() bool } // SharedIndexInformer provides add and get Indexers ability based on SharedInformer. @@ -205,14 +234,26 @@ type SharedIndexInformer interface { GetIndexer() Indexer } -// NewSharedInformer creates a new instance for the listwatcher. +// NewSharedInformer creates a new instance for the ListerWatcher. See NewSharedIndexInformerWithOptions for full details. func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) SharedInformer { return NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, Indexers{}) } -// NewSharedIndexInformer creates a new instance for the listwatcher. -// The created informer will not do resyncs if the given -// defaultEventHandlerResyncPeriod is zero. Otherwise: for each +// NewSharedIndexInformer creates a new instance for the ListerWatcher and specified Indexers. See +// NewSharedIndexInformerWithOptions for full details. +func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer { + return NewSharedIndexInformerWithOptions( + lw, + exampleObject, + SharedIndexInformerOptions{ + ResyncPeriod: defaultEventHandlerResyncPeriod, + Indexers: indexers, + }, + ) +} + +// NewSharedIndexInformerWithOptions creates a new instance for the ListerWatcher. +// The created informer will not do resyncs if options.ResyncPeriod is zero. Otherwise: for each // handler that with a non-zero requested resync period, whether added // before or after the informer starts, the nominal resync period is // the requested resync period rounded up to a multiple of the @@ -220,21 +261,36 @@ func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEv // checking period is established when the informer starts running, // and is the maximum of (a) the minimum of the resync periods // requested before the informer starts and the -// defaultEventHandlerResyncPeriod given here and (b) the constant +// options.ResyncPeriod given here and (b) the constant // `minimumResyncPeriod` defined in this file. -func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer { +func NewSharedIndexInformerWithOptions(lw ListerWatcher, exampleObject runtime.Object, options SharedIndexInformerOptions) SharedIndexInformer { realClock := &clock.RealClock{} - sharedIndexInformer := &sharedIndexInformer{ + + return &sharedIndexInformer{ + indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, options.Indexers), processor: &sharedProcessor{clock: realClock}, - indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers), listerWatcher: lw, objectType: exampleObject, - resyncCheckPeriod: defaultEventHandlerResyncPeriod, - defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod, - cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)), + objectDescription: options.ObjectDescription, + resyncCheckPeriod: options.ResyncPeriod, + defaultEventHandlerResyncPeriod: options.ResyncPeriod, clock: realClock, + cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)), } - return sharedIndexInformer +} + +// SharedIndexInformerOptions configures a sharedIndexInformer. +type SharedIndexInformerOptions struct { + // ResyncPeriod is the default event handler resync period and resync check + // period. If unset/unspecified, these are defaulted to 0 (do not resync). + ResyncPeriod time.Duration + + // Indexers is the sharedIndexInformer's indexers. If unset/unspecified, no indexers are configured. + Indexers Indexers + + // ObjectDescription is the sharedIndexInformer's object description. This is passed through to the + // underlying Reflector's type description. + ObjectDescription string } // InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced. @@ -308,12 +364,13 @@ type sharedIndexInformer struct { listerWatcher ListerWatcher - // objectType is an example object of the type this informer is - // expected to handle. Only the type needs to be right, except - // that when that is `unstructured.Unstructured` the object's - // `"apiVersion"` and `"kind"` must also be right. + // objectType is an example object of the type this informer is expected to handle. If set, an event + // with an object with a mismatching type is dropped instead of being delivered to listeners. objectType runtime.Object + // objectDescription is the description of this informer's objects. This typically defaults to + objectDescription string + // resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call // shouldResync to check if any of our listeners need a resync. resyncCheckPeriod time.Duration @@ -363,7 +420,8 @@ type updateNotification struct { } type addNotification struct { - newObj interface{} + newObj interface{} + isInInitialList bool } type deleteNotification struct { @@ -401,27 +459,30 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { klog.Warningf("The sharedIndexInformer has started, run more than once is not allowed") return } - fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ - KnownObjects: s.indexer, - EmitDeltaTypeReplaced: true, - }) - - cfg := &Config{ - Queue: fifo, - ListerWatcher: s.listerWatcher, - ObjectType: s.objectType, - FullResyncPeriod: s.resyncCheckPeriod, - RetryOnError: false, - ShouldResync: s.processor.shouldResync, - - Process: s.HandleDeltas, - WatchErrorHandler: s.watchErrorHandler, - } func() { s.startedLock.Lock() defer s.startedLock.Unlock() + fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ + KnownObjects: s.indexer, + EmitDeltaTypeReplaced: true, + Transformer: s.transform, + }) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: s.listerWatcher, + ObjectType: s.objectType, + ObjectDescription: s.objectDescription, + FullResyncPeriod: s.resyncCheckPeriod, + RetryOnError: false, + ShouldResync: s.processor.shouldResync, + + Process: s.HandleDeltas, + WatchErrorHandler: s.watchErrorHandler, + } + s.controller = New(cfg) s.controller.(*controller).clock = s.clock s.started = true @@ -492,8 +553,8 @@ func (s *sharedIndexInformer) GetController() Controller { return &dummyController{informer: s} } -func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) { - s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod) +func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error) { + return s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod) } func determineResyncPeriod(desired, check time.Duration) time.Duration { @@ -513,13 +574,12 @@ func determineResyncPeriod(desired, check time.Duration) time.Duration { const minimumResyncPeriod = 1 * time.Second -func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) { +func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) (ResourceEventHandlerRegistration, error) { s.startedLock.Lock() defer s.startedLock.Unlock() if s.stopped { - klog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler) - return + return nil, fmt.Errorf("handler %v was not added to shared informer because it has stopped already", handler) } if resyncPeriod > 0 { @@ -542,11 +602,10 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv } } - listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize) + listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize, s.HasSynced) if !s.started { - s.processor.addListener(listener) - return + return s.processor.addListener(listener), nil } // in order to safely join, we have to @@ -557,28 +616,37 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv s.blockDeltas.Lock() defer s.blockDeltas.Unlock() - s.processor.addListener(listener) + handle := s.processor.addListener(listener) for _, item := range s.indexer.List() { - listener.add(addNotification{newObj: item}) + // Note that we enqueue these notifications with the lock held + // and before returning the handle. That means there is never a + // chance for anyone to call the handle's HasSynced method in a + // state when it would falsely return true (i.e., when the + // shared informer is synced but it has not observed an Add + // with isInitialList being true, nor when the thread + // processing notifications somehow goes faster than this + // thread adding them and the counter is temporarily zero). + listener.add(addNotification{newObj: item, isInInitialList: true}) } + return handle, nil } -func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error { +func (s *sharedIndexInformer) HandleDeltas(obj interface{}, isInInitialList bool) error { s.blockDeltas.Lock() defer s.blockDeltas.Unlock() if deltas, ok := obj.(Deltas); ok { - return processDeltas(s, s.indexer, s.transform, deltas) + return processDeltas(s, s.indexer, deltas, isInInitialList) } return errors.New("object given as Process argument is not Deltas") } // Conforms to ResourceEventHandler -func (s *sharedIndexInformer) OnAdd(obj interface{}) { +func (s *sharedIndexInformer) OnAdd(obj interface{}, isInInitialList bool) { // Invocation of this function is locked under s.blockDeltas, so it is // save to distribute the notification s.cacheMutationDetector.AddObject(obj) - s.processor.distribute(addNotification{newObj: obj}, false) + s.processor.distribute(addNotification{newObj: obj, isInInitialList: isInInitialList}, false) } // Conforms to ResourceEventHandler @@ -610,6 +678,26 @@ func (s *sharedIndexInformer) OnDelete(old interface{}) { s.processor.distribute(deleteNotification{oldObj: old}, false) } +// IsStopped reports whether the informer has already been stopped +func (s *sharedIndexInformer) IsStopped() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + return s.stopped +} + +func (s *sharedIndexInformer) RemoveEventHandler(handle ResourceEventHandlerRegistration) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + // in order to safely remove, we have to + // 1. stop sending add/update/delete notifications + // 2. remove and stop listener + // 3. unblock + s.blockDeltas.Lock() + defer s.blockDeltas.Unlock() + return s.processor.removeListener(handle) +} + // sharedProcessor has a collection of processorListener and can // distribute a notification object to its listeners. There are two // kinds of distribute operations. The sync distributions go to a @@ -619,39 +707,85 @@ func (s *sharedIndexInformer) OnDelete(old interface{}) { type sharedProcessor struct { listenersStarted bool listenersLock sync.RWMutex - listeners []*processorListener - syncingListeners []*processorListener - clock clock.Clock - wg wait.Group + // Map from listeners to whether or not they are currently syncing + listeners map[*processorListener]bool + clock clock.Clock + wg wait.Group +} + +func (p *sharedProcessor) getListener(registration ResourceEventHandlerRegistration) *processorListener { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + + if p.listeners == nil { + return nil + } + + if result, ok := registration.(*processorListener); ok { + if _, exists := p.listeners[result]; exists { + return result + } + } + + return nil } -func (p *sharedProcessor) addListener(listener *processorListener) { +func (p *sharedProcessor) addListener(listener *processorListener) ResourceEventHandlerRegistration { p.listenersLock.Lock() defer p.listenersLock.Unlock() - p.addListenerLocked(listener) + if p.listeners == nil { + p.listeners = make(map[*processorListener]bool) + } + + p.listeners[listener] = true + if p.listenersStarted { p.wg.Start(listener.run) p.wg.Start(listener.pop) } + + return listener } -func (p *sharedProcessor) addListenerLocked(listener *processorListener) { - p.listeners = append(p.listeners, listener) - p.syncingListeners = append(p.syncingListeners, listener) +func (p *sharedProcessor) removeListener(handle ResourceEventHandlerRegistration) error { + p.listenersLock.Lock() + defer p.listenersLock.Unlock() + + listener, ok := handle.(*processorListener) + if !ok { + return fmt.Errorf("invalid key type %t", handle) + } else if p.listeners == nil { + // No listeners are registered, do nothing + return nil + } else if _, exists := p.listeners[listener]; !exists { + // Listener is not registered, just do nothing + return nil + } + + delete(p.listeners, listener) + + if p.listenersStarted { + close(listener.addCh) + } + + return nil } func (p *sharedProcessor) distribute(obj interface{}, sync bool) { p.listenersLock.RLock() defer p.listenersLock.RUnlock() - if sync { - for _, listener := range p.syncingListeners { + for listener, isSyncing := range p.listeners { + switch { + case !sync: + // non-sync messages are delivered to every listener listener.add(obj) - } - } else { - for _, listener := range p.listeners { + case isSyncing: + // sync messages are delivered to every syncing listener listener.add(obj) + default: + // skipping a sync obj for a non-syncing listener } } } @@ -660,18 +794,27 @@ func (p *sharedProcessor) run(stopCh <-chan struct{}) { func() { p.listenersLock.RLock() defer p.listenersLock.RUnlock() - for _, listener := range p.listeners { + for listener := range p.listeners { p.wg.Start(listener.run) p.wg.Start(listener.pop) } p.listenersStarted = true }() <-stopCh - p.listenersLock.RLock() - defer p.listenersLock.RUnlock() - for _, listener := range p.listeners { + + p.listenersLock.Lock() + defer p.listenersLock.Unlock() + for listener := range p.listeners { close(listener.addCh) // Tell .pop() to stop. .pop() will tell .run() to stop } + + // Wipe out list of listeners since they are now closed + // (processorListener cannot be re-used) + p.listeners = nil + + // Reset to false since no listeners are running + p.listenersStarted = false + p.wg.Wait() // Wait for all .pop() and .run() to stop } @@ -681,16 +824,16 @@ func (p *sharedProcessor) shouldResync() bool { p.listenersLock.Lock() defer p.listenersLock.Unlock() - p.syncingListeners = []*processorListener{} - resyncNeeded := false now := p.clock.Now() - for _, listener := range p.listeners { + for listener := range p.listeners { // need to loop through all the listeners to see if they need to resync so we can prepare any // listeners that are going to be resyncing. - if listener.shouldResync(now) { + shouldResync := listener.shouldResync(now) + p.listeners[listener] = shouldResync + + if shouldResync { resyncNeeded = true - p.syncingListeners = append(p.syncingListeners, listener) listener.determineNextResync(now) } } @@ -701,8 +844,9 @@ func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Durati p.listenersLock.RLock() defer p.listenersLock.RUnlock() - for _, listener := range p.listeners { - resyncPeriod := determineResyncPeriod(listener.requestedResyncPeriod, resyncCheckPeriod) + for listener := range p.listeners { + resyncPeriod := determineResyncPeriod( + listener.requestedResyncPeriod, resyncCheckPeriod) listener.setResyncPeriod(resyncPeriod) } } @@ -724,6 +868,8 @@ type processorListener struct { handler ResourceEventHandler + syncTracker *synctrack.SingleFileTracker + // pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed. // There is one per listener, but a failing/stalled listener will have infinite pendingNotifications // added until we OOM. @@ -754,11 +900,18 @@ type processorListener struct { resyncLock sync.Mutex } -func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener { +// HasSynced returns true if the source informer has synced, and all +// corresponding events have been delivered. +func (p *processorListener) HasSynced() bool { + return p.syncTracker.HasSynced() +} + +func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int, hasSynced func() bool) *processorListener { ret := &processorListener{ nextCh: make(chan interface{}), addCh: make(chan interface{}), handler: handler, + syncTracker: &synctrack.SingleFileTracker{UpstreamHasSynced: hasSynced}, pendingNotifications: *buffer.NewRingGrowing(bufferSize), requestedResyncPeriod: requestedResyncPeriod, resyncPeriod: resyncPeriod, @@ -770,6 +923,9 @@ func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, res } func (p *processorListener) add(notification interface{}) { + if a, ok := notification.(addNotification); ok && a.isInInitialList { + p.syncTracker.Start() + } p.addCh <- notification } @@ -815,7 +971,10 @@ func (p *processorListener) run() { case updateNotification: p.handler.OnUpdate(notification.oldObj, notification.newObj) case addNotification: - p.handler.OnAdd(notification.newObj) + p.handler.OnAdd(notification.newObj, notification.isInInitialList) + if notification.isInInitialList { + p.syncTracker.Finished() + } case deleteNotification: p.handler.OnDelete(notification.oldObj) default: diff --git a/vendor/k8s.io/client-go/tools/cache/store.go b/vendor/k8s.io/client-go/tools/cache/store.go index 24ffabab7..5cc3f42ec 100644 --- a/vendor/k8s.io/client-go/tools/cache/store.go +++ b/vendor/k8s.io/client-go/tools/cache/store.go @@ -21,6 +21,7 @@ import ( "strings" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Store is a generic object storage and processing interface. A @@ -99,20 +100,38 @@ type ExplicitKey string // The key uses the format / unless is empty, then // it's just . // -// TODO: replace key-as-string with a key-as-struct so that this -// packing/unpacking won't be necessary. +// Clients that want a structured alternative can use ObjectToName or MetaObjectToName. +// Note: this would not be a client that wants a key for a Store because those are +// necessarily strings. +// +// TODO maybe some day?: change Store to be keyed differently func MetaNamespaceKeyFunc(obj interface{}) (string, error) { if key, ok := obj.(ExplicitKey); ok { return string(key), nil } + objName, err := ObjectToName(obj) + if err != nil { + return "", err + } + return objName.String(), nil +} + +// ObjectToName returns the structured name for the given object, +// if indeed it can be viewed as a metav1.Object. +func ObjectToName(obj interface{}) (ObjectName, error) { meta, err := meta.Accessor(obj) if err != nil { - return "", fmt.Errorf("object has no meta: %v", err) + return ObjectName{}, fmt.Errorf("object has no meta: %v", err) } - if len(meta.GetNamespace()) > 0 { - return meta.GetNamespace() + "/" + meta.GetName(), nil + return MetaObjectToName(meta), nil +} + +// MetaObjectToName returns the structured name for the given object +func MetaObjectToName(obj metav1.Object) ObjectName { + if len(obj.GetNamespace()) > 0 { + return ObjectName{Namespace: obj.GetNamespace(), Name: obj.GetName()} } - return meta.GetName(), nil + return ObjectName{Namespace: "", Name: obj.GetName()} } // SplitMetaNamespaceKey returns the namespace and name that @@ -199,8 +218,11 @@ func (c *cache) Index(indexName string, obj interface{}) ([]interface{}, error) return c.cacheStorage.Index(indexName, obj) } -func (c *cache) IndexKeys(indexName, indexKey string) ([]string, error) { - return c.cacheStorage.IndexKeys(indexName, indexKey) +// IndexKeys returns the storage keys of the stored objects whose set of +// indexed values for the named index includes the given indexed value. +// The returned keys are suitable to pass to GetByKey(). +func (c *cache) IndexKeys(indexName, indexedValue string) ([]string, error) { + return c.cacheStorage.IndexKeys(indexName, indexedValue) } // ListIndexFuncValues returns the list of generated values of an Index func @@ -208,8 +230,10 @@ func (c *cache) ListIndexFuncValues(indexName string) []string { return c.cacheStorage.ListIndexFuncValues(indexName) } -func (c *cache) ByIndex(indexName, indexKey string) ([]interface{}, error) { - return c.cacheStorage.ByIndex(indexName, indexKey) +// ByIndex returns the stored objects whose set of indexed values +// for the named index includes the given indexed value. +func (c *cache) ByIndex(indexName, indexedValue string) ([]interface{}, error) { + return c.cacheStorage.ByIndex(indexName, indexedValue) } func (c *cache) AddIndexers(newIndexers Indexers) error { diff --git a/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go b/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go new file mode 100644 index 000000000..ce51da9af --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go @@ -0,0 +1,83 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package synctrack + +import ( + "sync" + "sync/atomic" +) + +// Lazy defers the computation of `Evaluate` to when it is necessary. It is +// possible that Evaluate will be called in parallel from multiple goroutines. +type Lazy[T any] struct { + Evaluate func() (T, error) + + cache atomic.Pointer[cacheEntry[T]] +} + +type cacheEntry[T any] struct { + eval func() (T, error) + lock sync.RWMutex + result *T +} + +func (e *cacheEntry[T]) get() (T, error) { + if cur := func() *T { + e.lock.RLock() + defer e.lock.RUnlock() + return e.result + }(); cur != nil { + return *cur, nil + } + + e.lock.Lock() + defer e.lock.Unlock() + if e.result != nil { + return *e.result, nil + } + r, err := e.eval() + if err == nil { + e.result = &r + } + return r, err +} + +func (z *Lazy[T]) newCacheEntry() *cacheEntry[T] { + return &cacheEntry[T]{eval: z.Evaluate} +} + +// Notify should be called when something has changed necessitating a new call +// to Evaluate. +func (z *Lazy[T]) Notify() { z.cache.Swap(z.newCacheEntry()) } + +// Get should be called to get the current result of a call to Evaluate. If the +// current cached value is stale (due to a call to Notify), then Evaluate will +// be called synchronously. If subsequent calls to Get happen (without another +// Notify), they will all wait for the same return value. +// +// Error returns are not cached and will cause multiple calls to evaluate! +func (z *Lazy[T]) Get() (T, error) { + e := z.cache.Load() + if e == nil { + // Since we don't force a constructor, nil is a possible value. + // If multiple Gets race to set this, the swap makes sure only + // one wins. + z.cache.CompareAndSwap(nil, z.newCacheEntry()) + e = z.cache.Load() + } + return e.get() +} diff --git a/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go b/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go new file mode 100644 index 000000000..3fa2beb6b --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go @@ -0,0 +1,120 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package synctrack contains utilities for helping controllers track whether +// they are "synced" or not, that is, whether they have processed all items +// from the informer's initial list. +package synctrack + +import ( + "sync" + "sync/atomic" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// AsyncTracker helps propagate HasSynced in the face of multiple worker threads. +type AsyncTracker[T comparable] struct { + UpstreamHasSynced func() bool + + lock sync.Mutex + waiting sets.Set[T] +} + +// Start should be called prior to processing each key which is part of the +// initial list. +func (t *AsyncTracker[T]) Start(key T) { + t.lock.Lock() + defer t.lock.Unlock() + if t.waiting == nil { + t.waiting = sets.New[T](key) + } else { + t.waiting.Insert(key) + } +} + +// Finished should be called when finished processing a key which was part of +// the initial list. Since keys are tracked individually, nothing bad happens +// if you call Finished without a corresponding call to Start. This makes it +// easier to use this in combination with e.g. queues which don't make it easy +// to plumb through the isInInitialList boolean. +func (t *AsyncTracker[T]) Finished(key T) { + t.lock.Lock() + defer t.lock.Unlock() + if t.waiting != nil { + t.waiting.Delete(key) + } +} + +// HasSynced returns true if the source is synced and every key present in the +// initial list has been processed. This relies on the source not considering +// itself synced until *after* it has delivered the notification for the last +// key, and that notification handler must have called Start. +func (t *AsyncTracker[T]) HasSynced() bool { + // Call UpstreamHasSynced first: it might take a lock, which might take + // a significant amount of time, and we can't hold our lock while + // waiting on that or a user is likely to get a deadlock. + if !t.UpstreamHasSynced() { + return false + } + t.lock.Lock() + defer t.lock.Unlock() + return t.waiting.Len() == 0 +} + +// SingleFileTracker helps propagate HasSynced when events are processed in +// order (i.e. via a queue). +type SingleFileTracker struct { + // Important: count is used with atomic operations so it must be 64-bit + // aligned, otherwise atomic operations will panic. Having it at the top of + // the struct will guarantee that, even on 32-bit arches. + // See https://pkg.go.dev/sync/atomic#pkg-note-BUG for more information. + count int64 + + UpstreamHasSynced func() bool +} + +// Start should be called prior to processing each key which is part of the +// initial list. +func (t *SingleFileTracker) Start() { + atomic.AddInt64(&t.count, 1) +} + +// Finished should be called when finished processing a key which was part of +// the initial list. You must never call Finished() before (or without) its +// corresponding Start(), that is a logic error that could cause HasSynced to +// return a wrong value. To help you notice this should it happen, Finished() +// will panic if the internal counter goes negative. +func (t *SingleFileTracker) Finished() { + result := atomic.AddInt64(&t.count, -1) + if result < 0 { + panic("synctrack: negative counter; this logic error means HasSynced may return incorrect value") + } +} + +// HasSynced returns true if the source is synced and every key present in the +// initial list has been processed. This relies on the source not considering +// itself synced until *after* it has delivered the notification for the last +// key, and that notification handler must have called Start. +func (t *SingleFileTracker) HasSynced() bool { + // Call UpstreamHasSynced first: it might take a lock, which might take + // a significant amount of time, and we don't want to then act on a + // stale count value. + if !t.UpstreamHasSynced() { + return false + } + return atomic.LoadInt64(&t.count) <= 0 +} diff --git a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go index 6d58c0d69..145e93ee5 100644 --- a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go +++ b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -47,9 +47,9 @@ type ThreadSafeStore interface { ListKeys() []string Replace(map[string]interface{}, string) Index(indexName string, obj interface{}) ([]interface{}, error) - IndexKeys(indexName, indexKey string) ([]string, error) + IndexKeys(indexName, indexedValue string) ([]string, error) ListIndexFuncValues(name string) []string - ByIndex(indexName, indexKey string) ([]interface{}, error) + ByIndex(indexName, indexedValue string) ([]interface{}, error) GetIndexers() Indexers // AddIndexers adds more indexers to this store. If you call this after you already have data @@ -59,15 +59,159 @@ type ThreadSafeStore interface { Resync() error } +// storeIndex implements the indexing functionality for Store interface +type storeIndex struct { + // indexers maps a name to an IndexFunc + indexers Indexers + // indices maps a name to an Index + indices Indices +} + +func (i *storeIndex) reset() { + i.indices = Indices{} +} + +func (i *storeIndex) getKeysFromIndex(indexName string, obj interface{}) (sets.String, error) { + indexFunc := i.indexers[indexName] + if indexFunc == nil { + return nil, fmt.Errorf("Index with name %s does not exist", indexName) + } + + indexedValues, err := indexFunc(obj) + if err != nil { + return nil, err + } + index := i.indices[indexName] + + var storeKeySet sets.String + if len(indexedValues) == 1 { + // In majority of cases, there is exactly one value matching. + // Optimize the most common path - deduping is not needed here. + storeKeySet = index[indexedValues[0]] + } else { + // Need to de-dupe the return list. + // Since multiple keys are allowed, this can happen. + storeKeySet = sets.String{} + for _, indexedValue := range indexedValues { + for key := range index[indexedValue] { + storeKeySet.Insert(key) + } + } + } + + return storeKeySet, nil +} + +func (i *storeIndex) getKeysByIndex(indexName, indexedValue string) (sets.String, error) { + indexFunc := i.indexers[indexName] + if indexFunc == nil { + return nil, fmt.Errorf("Index with name %s does not exist", indexName) + } + + index := i.indices[indexName] + return index[indexedValue], nil +} + +func (i *storeIndex) getIndexValues(indexName string) []string { + index := i.indices[indexName] + names := make([]string, 0, len(index)) + for key := range index { + names = append(names, key) + } + return names +} + +func (i *storeIndex) addIndexers(newIndexers Indexers) error { + oldKeys := sets.StringKeySet(i.indexers) + newKeys := sets.StringKeySet(newIndexers) + + if oldKeys.HasAny(newKeys.List()...) { + return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys)) + } + + for k, v := range newIndexers { + i.indexers[k] = v + } + return nil +} + +// updateIndices modifies the objects location in the managed indexes: +// - for create you must provide only the newObj +// - for update you must provide both the oldObj and the newObj +// - for delete you must provide only the oldObj +// updateIndices must be called from a function that already has a lock on the cache +func (i *storeIndex) updateIndices(oldObj interface{}, newObj interface{}, key string) { + var oldIndexValues, indexValues []string + var err error + for name, indexFunc := range i.indexers { + if oldObj != nil { + oldIndexValues, err = indexFunc(oldObj) + } else { + oldIndexValues = oldIndexValues[:0] + } + if err != nil { + panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) + } + + if newObj != nil { + indexValues, err = indexFunc(newObj) + } else { + indexValues = indexValues[:0] + } + if err != nil { + panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) + } + + index := i.indices[name] + if index == nil { + index = Index{} + i.indices[name] = index + } + + if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] { + // We optimize for the most common case where indexFunc returns a single value which has not been changed + continue + } + + for _, value := range oldIndexValues { + i.deleteKeyFromIndex(key, value, index) + } + for _, value := range indexValues { + i.addKeyToIndex(key, value, index) + } + } +} + +func (i *storeIndex) addKeyToIndex(key, indexValue string, index Index) { + set := index[indexValue] + if set == nil { + set = sets.String{} + index[indexValue] = set + } + set.Insert(key) +} + +func (i *storeIndex) deleteKeyFromIndex(key, indexValue string, index Index) { + set := index[indexValue] + if set == nil { + return + } + set.Delete(key) + // If we don't delete the set when zero, indices with high cardinality + // short lived resources can cause memory to increase over time from + // unused empty sets. See `kubernetes/kubernetes/issues/84959`. + if len(set) == 0 { + delete(index, indexValue) + } +} + // threadSafeMap implements ThreadSafeStore type threadSafeMap struct { lock sync.RWMutex items map[string]interface{} - // indexers maps a name to an IndexFunc - indexers Indexers - // indices maps a name to an Index - indices Indices + // index implements the indexing functionality + index *storeIndex } func (c *threadSafeMap) Add(key string, obj interface{}) { @@ -79,14 +223,14 @@ func (c *threadSafeMap) Update(key string, obj interface{}) { defer c.lock.Unlock() oldObject := c.items[key] c.items[key] = obj - c.updateIndices(oldObject, obj, key) + c.index.updateIndices(oldObject, obj, key) } func (c *threadSafeMap) Delete(key string) { c.lock.Lock() defer c.lock.Unlock() if obj, exists := c.items[key]; exists { - c.updateIndices(obj, nil, key) + c.index.updateIndices(obj, nil, key) delete(c.items, key) } } @@ -126,9 +270,9 @@ func (c *threadSafeMap) Replace(items map[string]interface{}, resourceVersion st c.items = items // rebuild any index - c.indices = Indices{} + c.index.reset() for key, item := range c.items { - c.updateIndices(nil, item, key) + c.index.updateIndices(nil, item, key) } } @@ -138,32 +282,10 @@ func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, c.lock.RLock() defer c.lock.RUnlock() - indexFunc := c.indexers[indexName] - if indexFunc == nil { - return nil, fmt.Errorf("Index with name %s does not exist", indexName) - } - - indexedValues, err := indexFunc(obj) + storeKeySet, err := c.index.getKeysFromIndex(indexName, obj) if err != nil { return nil, err } - index := c.indices[indexName] - - var storeKeySet sets.String - if len(indexedValues) == 1 { - // In majority of cases, there is exactly one value matching. - // Optimize the most common path - deduping is not needed here. - storeKeySet = index[indexedValues[0]] - } else { - // Need to de-dupe the return list. - // Since multiple keys are allowed, this can happen. - storeKeySet = sets.String{} - for _, indexedValue := range indexedValues { - for key := range index[indexedValue] { - storeKeySet.Insert(key) - } - } - } list := make([]interface{}, 0, storeKeySet.Len()) for storeKey := range storeKeySet { @@ -177,14 +299,10 @@ func (c *threadSafeMap) ByIndex(indexName, indexedValue string) ([]interface{}, c.lock.RLock() defer c.lock.RUnlock() - indexFunc := c.indexers[indexName] - if indexFunc == nil { - return nil, fmt.Errorf("Index with name %s does not exist", indexName) + set, err := c.index.getKeysByIndex(indexName, indexedValue) + if err != nil { + return nil, err } - - index := c.indices[indexName] - - set := index[indexedValue] list := make([]interface{}, 0, set.Len()) for key := range set { list = append(list, c.items[key]) @@ -199,14 +317,10 @@ func (c *threadSafeMap) IndexKeys(indexName, indexedValue string) ([]string, err c.lock.RLock() defer c.lock.RUnlock() - indexFunc := c.indexers[indexName] - if indexFunc == nil { - return nil, fmt.Errorf("Index with name %s does not exist", indexName) + set, err := c.index.getKeysByIndex(indexName, indexedValue) + if err != nil { + return nil, err } - - index := c.indices[indexName] - - set := index[indexedValue] return set.List(), nil } @@ -214,16 +328,11 @@ func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string { c.lock.RLock() defer c.lock.RUnlock() - index := c.indices[indexName] - names := make([]string, 0, len(index)) - for key := range index { - names = append(names, key) - } - return names + return c.index.getIndexValues(indexName) } func (c *threadSafeMap) GetIndexers() Indexers { - return c.indexers + return c.index.indexers } func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error { @@ -234,87 +343,7 @@ func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error { return fmt.Errorf("cannot add indexers to running index") } - oldKeys := sets.StringKeySet(c.indexers) - newKeys := sets.StringKeySet(newIndexers) - - if oldKeys.HasAny(newKeys.List()...) { - return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys)) - } - - for k, v := range newIndexers { - c.indexers[k] = v - } - return nil -} - -// updateIndices modifies the objects location in the managed indexes: -// - for create you must provide only the newObj -// - for update you must provide both the oldObj and the newObj -// - for delete you must provide only the oldObj -// updateIndices must be called from a function that already has a lock on the cache -func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) { - var oldIndexValues, indexValues []string - var err error - for name, indexFunc := range c.indexers { - if oldObj != nil { - oldIndexValues, err = indexFunc(oldObj) - } else { - oldIndexValues = oldIndexValues[:0] - } - if err != nil { - panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) - } - - if newObj != nil { - indexValues, err = indexFunc(newObj) - } else { - indexValues = indexValues[:0] - } - if err != nil { - panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) - } - - index := c.indices[name] - if index == nil { - index = Index{} - c.indices[name] = index - } - - if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] { - // We optimize for the most common case where indexFunc returns a single value which has not been changed - continue - } - - for _, value := range oldIndexValues { - c.deleteKeyFromIndex(key, value, index) - } - for _, value := range indexValues { - c.addKeyToIndex(key, value, index) - } - } -} - -func (c *threadSafeMap) addKeyToIndex(key, indexValue string, index Index) { - set := index[indexValue] - if set == nil { - set = sets.String{} - index[indexValue] = set - } - set.Insert(key) -} - -func (c *threadSafeMap) deleteKeyFromIndex(key, indexValue string, index Index) { - set := index[indexValue] - if set == nil { - return - } - set.Delete(key) - // If we don't delete the set when zero, indices with high cardinality - // short lived resources can cause memory to increase over time from - // unused empty sets. See `kubernetes/kubernetes/issues/84959`. - if len(set) == 0 { - delete(index, indexValue) - } + return c.index.addIndexers(newIndexers) } func (c *threadSafeMap) Resync() error { @@ -325,8 +354,10 @@ func (c *threadSafeMap) Resync() error { // NewThreadSafeStore creates a new instance of ThreadSafeStore. func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore { return &threadSafeMap{ - items: map[string]interface{}{}, - indexers: indexers, - indices: indices, + items: map[string]interface{}{}, + index: &storeIndex{ + indexers: indexers, + indices: indices, + }, } } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go index d677d6519..dd5f91806 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go @@ -20,10 +20,11 @@ import ( "encoding/base64" "errors" "fmt" - "io/ioutil" "os" "path" "path/filepath" + "reflect" + "strings" ) func init() { @@ -82,21 +83,21 @@ func MinifyConfig(config *Config) error { } var ( - redactedBytes []byte dataOmittedBytes []byte + redactedBytes []byte ) -// Flatten redacts raw data entries from the config object for a human-readable view. +// ShortenConfig redacts raw data entries from the config object for a human-readable view. func ShortenConfig(config *Config) { - // trick json encoder into printing a human readable string in the raw data + // trick json encoder into printing a human-readable string in the raw data // by base64 decoding what we want to print. Relies on implementation of // http://golang.org/pkg/encoding/json/#Marshal using base64 to encode []byte for key, authInfo := range config.AuthInfos { if len(authInfo.ClientKeyData) > 0 { - authInfo.ClientKeyData = redactedBytes + authInfo.ClientKeyData = dataOmittedBytes } if len(authInfo.ClientCertificateData) > 0 { - authInfo.ClientCertificateData = redactedBytes + authInfo.ClientCertificateData = dataOmittedBytes } if len(authInfo.Token) > 0 { authInfo.Token = "REDACTED" @@ -111,7 +112,7 @@ func ShortenConfig(config *Config) { } } -// Flatten changes the config object into a self contained config (useful for making secrets) +// FlattenConfig changes the config object into a self-contained config (useful for making secrets) func FlattenConfig(config *Config) error { for key, authInfo := range config.AuthInfos { baseDir, err := MakeAbs(path.Dir(authInfo.LocationOfOrigin), "") @@ -152,7 +153,7 @@ func FlattenContent(path *string, contents *[]byte, baseDir string) error { var err error absPath := ResolvePath(*path, baseDir) - *contents, err = ioutil.ReadFile(absPath) + *contents, err = os.ReadFile(absPath) if err != nil { return err } @@ -189,3 +190,77 @@ func MakeAbs(path, base string) (string, error) { } return filepath.Join(base, path), nil } + +// RedactSecrets replaces any sensitive values with REDACTED +func RedactSecrets(config *Config) error { + return redactSecrets(reflect.ValueOf(config), false) +} + +func redactSecrets(curr reflect.Value, redact bool) error { + redactedBytes = []byte("REDACTED") + if !curr.IsValid() { + return nil + } + + actualCurrValue := curr + if curr.Kind() == reflect.Ptr { + actualCurrValue = curr.Elem() + } + + switch actualCurrValue.Kind() { + case reflect.Map: + for _, v := range actualCurrValue.MapKeys() { + err := redactSecrets(actualCurrValue.MapIndex(v), false) + if err != nil { + return err + } + } + return nil + + case reflect.String: + if redact { + if !actualCurrValue.IsZero() { + actualCurrValue.SetString("REDACTED") + } + } + return nil + + case reflect.Slice: + if actualCurrValue.Type() == reflect.TypeOf([]byte{}) && redact { + if !actualCurrValue.IsNil() { + actualCurrValue.SetBytes(redactedBytes) + } + return nil + } + for i := 0; i < actualCurrValue.Len(); i++ { + err := redactSecrets(actualCurrValue.Index(i), false) + if err != nil { + return err + } + } + return nil + + case reflect.Struct: + for fieldIndex := 0; fieldIndex < actualCurrValue.NumField(); fieldIndex++ { + currFieldValue := actualCurrValue.Field(fieldIndex) + currFieldType := actualCurrValue.Type().Field(fieldIndex) + currYamlTag := currFieldType.Tag.Get("datapolicy") + currFieldTypeYamlName := strings.Split(currYamlTag, ",")[0] + if currFieldTypeYamlName != "" { + err := redactSecrets(currFieldValue, true) + if err != nil { + return err + } + } else { + err := redactSecrets(currFieldValue, false) + if err != nil { + return err + } + } + } + return nil + + default: + return nil + } +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go index 44a2fb94b..ae8b8c703 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go @@ -67,7 +67,7 @@ type Preferences struct { type Cluster struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. // +k8s:conversion-gen=false - LocationOfOrigin string + LocationOfOrigin string `json:"-"` // Server is the address of the kubernetes cluster (https://hostname:port). Server string `json:"server"` // TLSServerName is used to check server certificate. If TLSServerName is empty, the hostname used to contact the server is used. @@ -93,6 +93,11 @@ type Cluster struct { // attach, port forward). // +optional ProxyURL string `json:"proxy-url,omitempty"` + // DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful + // to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on + // compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296. + // +optional + DisableCompression bool `json:"disable-compression,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // +optional Extensions map[string]runtime.Object `json:"extensions,omitempty"` @@ -102,7 +107,7 @@ type Cluster struct { type AuthInfo struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. // +k8s:conversion-gen=false - LocationOfOrigin string + LocationOfOrigin string `json:"-"` // ClientCertificate is the path to a client cert file for TLS. // +optional ClientCertificate string `json:"client-certificate,omitempty"` @@ -154,7 +159,7 @@ type AuthInfo struct { type Context struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. // +k8s:conversion-gen=false - LocationOfOrigin string + LocationOfOrigin string `json:"-"` // Cluster is the name of the cluster for this context Cluster string `json:"cluster"` // AuthInfo is the name of the authInfo for this context @@ -247,7 +252,7 @@ type ExecConfig struct { // recommended as one of the prime benefits of exec plugins is that no secrets need // to be stored directly in the kubeconfig. // +k8s:conversion-gen=false - Config runtime.Object + Config runtime.Object `json:"-"` // InteractiveMode determines this plugin's relationship with standard input. Valid // values are "Never" (this exec plugin never uses standard input), "IfAvailable" (this @@ -259,7 +264,7 @@ type ExecConfig struct { // client.authentication.k8s.io/v1beta1, then this field is optional and defaults // to "IfAvailable" when unset. Otherwise, this field is required. // +optional - InteractiveMode ExecInteractiveMode + InteractiveMode ExecInteractiveMode `json:"interactiveMode,omitempty"` // StdinUnavailable indicates whether the exec authenticator can pass standard // input through to this exec plugin. For example, a higher level entity might be using @@ -267,14 +272,14 @@ type ExecConfig struct { // plugin to use standard input. This is kept here in order to keep all of the exec configuration // together, but it is never serialized. // +k8s:conversion-gen=false - StdinUnavailable bool + StdinUnavailable bool `json:"-"` // StdinUnavailableMessage is an optional message to be displayed when the exec authenticator // cannot successfully run this exec plugin because it needs to use standard input and // StdinUnavailable is true. For example, a process that is already using standard input to // read user instructions might set this to "used by my-program to read user instructions". // +k8s:conversion-gen=false - StdinUnavailableMessage string + StdinUnavailableMessage string `json:"-"` } var _ fmt.Stringer = new(ExecConfig) diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go index 757ed817b..5018a72b1 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go @@ -86,6 +86,11 @@ type Cluster struct { // attach, port forward). // +optional ProxyURL string `json:"proxy-url,omitempty"` + // DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful + // to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on + // compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296. + // +optional + DisableCompression bool `json:"disable-compression,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // +optional Extensions []NamedExtension `json:"extensions,omitempty"` diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go index a13bae64d..bdedc1666 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go @@ -257,6 +257,7 @@ func autoConvert_v1_Cluster_To_api_Cluster(in *Cluster, out *api.Cluster, s conv out.CertificateAuthority = in.CertificateAuthority out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) out.ProxyURL = in.ProxyURL + out.DisableCompression = in.DisableCompression if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil { return err } @@ -276,6 +277,7 @@ func autoConvert_api_Cluster_To_v1_Cluster(in *api.Cluster, out *Cluster, s conv out.CertificateAuthority = in.CertificateAuthority out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) out.ProxyURL = in.ProxyURL + out.DisableCompression = in.DisableCompression if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil { return err } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go b/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go index 0e4127762..ce951e88d 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go @@ -20,7 +20,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "os" "golang.org/x/term" @@ -51,15 +50,15 @@ func (a *PromptingAuthLoader) LoadAuth(path string) (*clientauth.Info, error) { // Prompt for user/pass and write a file if none exists. if _, err := os.Stat(path); os.IsNotExist(err) { authPtr, err := a.Prompt() - auth := *authPtr if err != nil { return nil, err } + auth := *authPtr data, err := json.Marshal(auth) if err != nil { return &auth, err } - err = ioutil.WriteFile(path, data, 0600) + err = os.WriteFile(path, data, 0600) return &auth, err } authPtr, err := clientauth.LoadFromFile(path) diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go index cc37c9fbf..ae0f01f32 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -19,7 +19,6 @@ package clientcmd import ( "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -165,6 +164,8 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { clientConfig.Proxy = http.ProxyURL(u) } + clientConfig.DisableCompression = configClusterInfo.DisableCompression + if config.overrides != nil && len(config.overrides.Timeout) > 0 { timeout, err := ParseTimeout(config.overrides.Timeout) if err != nil { @@ -246,7 +247,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI mergedConfig.BearerToken = configAuthInfo.Token mergedConfig.BearerTokenFile = configAuthInfo.TokenFile } else if len(configAuthInfo.TokenFile) > 0 { - tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) + tokenBytes, err := os.ReadFile(configAuthInfo.TokenFile) if err != nil { return nil, err } @@ -586,7 +587,7 @@ func (config *inClusterClientConfig) Namespace() (string, bool, error) { } // Fall back to the namespace associated with the service account token, if available - if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { + if data, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { if ns := strings.TrimSpace(string(data)); len(ns) > 0 { return ns, false, nil } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/vendor/k8s.io/client-go/tools/clientcmd/loader.go index 78bd9ed8d..b75737f1c 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/loader.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/loader.go @@ -18,7 +18,6 @@ package clientcmd import ( "fmt" - "io/ioutil" "os" "path/filepath" "reflect" @@ -129,6 +128,28 @@ type ClientConfigLoadingRules struct { // WarnIfAllMissing indicates whether the configuration files pointed by KUBECONFIG environment variable are present or not. // In case of missing files, it warns the user about the missing files. WarnIfAllMissing bool + + // Warner is the warning log callback to use in case of missing files. + Warner WarningHandler +} + +// WarningHandler allows to set the logging function to use +type WarningHandler func(error) + +func (handler WarningHandler) Warn(err error) { + if handler == nil { + klog.V(1).Info(err) + } else { + handler(err) + } +} + +type MissingConfigError struct { + Missing []string +} + +func (c MissingConfigError) Error() string { + return fmt.Sprintf("Config not found: %s", strings.Join(c.Missing, ", ")) } // ClientConfigLoadingRules implements the ClientConfigLoader interface. @@ -160,8 +181,10 @@ func NewDefaultClientConfigLoadingRules() *ClientConfigLoadingRules { // Load starts by running the MigrationRules and then // takes the loading rules and returns a Config object based on following rules. -// if the ExplicitPath, return the unmerged explicit file -// Otherwise, return a merged config based on the Precedence slice +// +// if the ExplicitPath, return the unmerged explicit file +// Otherwise, return a merged config based on the Precedence slice +// // A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. // Read errors or files with non-deserializable content produce errors. // The first file to set a particular map key wins and map key's value is never changed. @@ -218,7 +241,7 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { } if rules.WarnIfAllMissing && len(missingList) > 0 && len(kubeconfigs) == 0 { - klog.Warningf("Config not found: %s", strings.Join(missingList, ", ")) + rules.Warner.Warn(MissingConfigError{Missing: missingList}) } // first merge all of our maps @@ -281,12 +304,12 @@ func (rules *ClientConfigLoadingRules) Migrate() error { return fmt.Errorf("cannot migrate %v to %v because it is a directory", source, destination) } - data, err := ioutil.ReadFile(source) + data, err := os.ReadFile(source) if err != nil { return err } // destination is created with mode 0666 before umask - err = ioutil.WriteFile(destination, data, 0666) + err = os.WriteFile(destination, data, 0666) if err != nil { return err } @@ -361,7 +384,7 @@ func (rules *ClientConfigLoadingRules) IsDefaultConfig(config *restclient.Config // LoadFromFile takes a filename and deserializes the contents into Config object func LoadFromFile(filename string) (*clientcmdapi.Config, error) { - kubeconfigBytes, err := ioutil.ReadFile(filename) + kubeconfigBytes, err := os.ReadFile(filename) if err != nil { return nil, err } @@ -427,7 +450,7 @@ func WriteToFile(config clientcmdapi.Config, filename string) error { } } - if err := ioutil.WriteFile(filename, content, 0600); err != nil { + if err := os.WriteFile(filename, content, 0600); err != nil { return err } return nil diff --git a/vendor/k8s.io/client-go/tools/clientcmd/overrides.go b/vendor/k8s.io/client-go/tools/clientcmd/overrides.go index 4c290db55..483e51532 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/overrides.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/overrides.go @@ -74,6 +74,7 @@ type ClusterOverrideFlags struct { InsecureSkipTLSVerify FlagInfo TLSServerName FlagInfo ProxyURL FlagInfo + DisableCompression FlagInfo } // FlagInfo contains information about how to register a flag. This struct is useful if you want to provide a way for an extender to @@ -143,25 +144,26 @@ func (f FlagInfo) BindBoolFlag(flags *pflag.FlagSet, target *bool) FlagInfo { } const ( - FlagClusterName = "cluster" - FlagAuthInfoName = "user" - FlagContext = "context" - FlagNamespace = "namespace" - FlagAPIServer = "server" - FlagTLSServerName = "tls-server-name" - FlagInsecure = "insecure-skip-tls-verify" - FlagCertFile = "client-certificate" - FlagKeyFile = "client-key" - FlagCAFile = "certificate-authority" - FlagEmbedCerts = "embed-certs" - FlagBearerToken = "token" - FlagImpersonate = "as" - FlagImpersonateUID = "as-uid" - FlagImpersonateGroup = "as-group" - FlagUsername = "username" - FlagPassword = "password" - FlagTimeout = "request-timeout" - FlagProxyURL = "proxy-url" + FlagClusterName = "cluster" + FlagAuthInfoName = "user" + FlagContext = "context" + FlagNamespace = "namespace" + FlagAPIServer = "server" + FlagTLSServerName = "tls-server-name" + FlagInsecure = "insecure-skip-tls-verify" + FlagCertFile = "client-certificate" + FlagKeyFile = "client-key" + FlagCAFile = "certificate-authority" + FlagEmbedCerts = "embed-certs" + FlagBearerToken = "token" + FlagImpersonate = "as" + FlagImpersonateUID = "as-uid" + FlagImpersonateGroup = "as-group" + FlagUsername = "username" + FlagPassword = "password" + FlagTimeout = "request-timeout" + FlagProxyURL = "proxy-url" + FlagDisableCompression = "disable-compression" ) // RecommendedConfigOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing @@ -198,6 +200,7 @@ func RecommendedClusterOverrideFlags(prefix string) ClusterOverrideFlags { InsecureSkipTLSVerify: FlagInfo{prefix + FlagInsecure, "", "false", "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure"}, TLSServerName: FlagInfo{prefix + FlagTLSServerName, "", "", "If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used."}, ProxyURL: FlagInfo{prefix + FlagProxyURL, "", "", "If provided, this URL will be used to connect via proxy"}, + DisableCompression: FlagInfo{prefix + FlagDisableCompression, "", "", "If true, opt-out of response compression for all requests to the server"}, } } @@ -238,6 +241,7 @@ func BindClusterFlags(clusterInfo *clientcmdapi.Cluster, flags *pflag.FlagSet, f flagNames.InsecureSkipTLSVerify.BindBoolFlag(flags, &clusterInfo.InsecureSkipTLSVerify) flagNames.TLSServerName.BindStringFlag(flags, &clusterInfo.TLSServerName) flagNames.ProxyURL.BindStringFlag(flags, &clusterInfo.ProxyURL) + flagNames.DisableCompression.BindBoolFlag(flags, &clusterInfo.DisableCompression) } // BindFlags is a convenience method to bind the specified flags to their associated variables diff --git a/vendor/k8s.io/client-go/tools/clientcmd/validation.go b/vendor/k8s.io/client-go/tools/clientcmd/validation.go index 2ae1eb706..088972ef6 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/validation.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/validation.go @@ -204,8 +204,19 @@ func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error { if exists { validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) - validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *config.AuthInfos[context.AuthInfo])...) - validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *config.Clusters[context.Cluster])...) + + // Default to empty users and clusters and let the validation function report an error. + authInfo := config.AuthInfos[context.AuthInfo] + if authInfo == nil { + authInfo = &clientcmdapi.AuthInfo{} + } + validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *authInfo)...) + + cluster := config.Clusters[context.Cluster] + if cluster == nil { + cluster = &clientcmdapi.Cluster{} + } + validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *cluster)...) } return newErrConfigurationInvalid(validationErrors) diff --git a/vendor/k8s.io/client-go/tools/metrics/metrics.go b/vendor/k8s.io/client-go/tools/metrics/metrics.go index 6c684c7fa..99d3d8e23 100644 --- a/vendor/k8s.io/client-go/tools/metrics/metrics.go +++ b/vendor/k8s.io/client-go/tools/metrics/metrics.go @@ -42,6 +42,10 @@ type LatencyMetric interface { Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) } +type ResolverLatencyMetric interface { + Observe(ctx context.Context, host string, latency time.Duration) +} + // SizeMetric observes client response size partitioned by verb and host. type SizeMetric interface { Observe(ctx context.Context, verb string, host string, size float64) @@ -58,6 +62,23 @@ type CallsMetric interface { Increment(exitCode int, callStatus string) } +// RetryMetric counts the number of retries sent to the server +// partitioned by code, method, and host. +type RetryMetric interface { + IncrementRetry(ctx context.Context, code string, method string, host string) +} + +// TransportCacheMetric shows the number of entries in the internal transport cache +type TransportCacheMetric interface { + Observe(value int) +} + +// TransportCreateCallsMetric counts the number of times a transport is created +// partitioned by the result of the cache: hit, miss, uncacheable +type TransportCreateCallsMetric interface { + Increment(result string) +} + var ( // ClientCertExpiry is the expiry time of a client certificate ClientCertExpiry ExpiryMetric = noopExpiry{} @@ -65,6 +86,8 @@ var ( ClientCertRotationAge DurationMetric = noopDuration{} // RequestLatency is the latency metric that rest clients will update. RequestLatency LatencyMetric = noopLatency{} + // ResolverLatency is the latency metric that DNS resolver will update + ResolverLatency ResolverLatencyMetric = noopResolverLatency{} // RequestSize is the request size metric that rest clients will update. RequestSize SizeMetric = noopSize{} // ResponseSize is the response size metric that rest clients will update. @@ -76,6 +99,15 @@ var ( // ExecPluginCalls is the number of calls made to an exec plugin, partitioned by // exit code and call status. ExecPluginCalls CallsMetric = noopCalls{} + // RequestRetry is the retry metric that tracks the number of + // retries sent to the server. + RequestRetry RetryMetric = noopRetry{} + // TransportCacheEntries is the metric that tracks the number of entries in the + // internal transport cache. + TransportCacheEntries TransportCacheMetric = noopTransportCache{} + // TransportCreateCalls is the metric that counts the number of times a new transport + // is created + TransportCreateCalls TransportCreateCallsMetric = noopTransportCreateCalls{} ) // RegisterOpts contains all the metrics to register. Metrics may be nil. @@ -83,11 +115,15 @@ type RegisterOpts struct { ClientCertExpiry ExpiryMetric ClientCertRotationAge DurationMetric RequestLatency LatencyMetric + ResolverLatency ResolverLatencyMetric RequestSize SizeMetric ResponseSize SizeMetric RateLimiterLatency LatencyMetric RequestResult ResultMetric ExecPluginCalls CallsMetric + RequestRetry RetryMetric + TransportCacheEntries TransportCacheMetric + TransportCreateCalls TransportCreateCallsMetric } // Register registers metrics for the rest client to use. This can @@ -103,6 +139,9 @@ func Register(opts RegisterOpts) { if opts.RequestLatency != nil { RequestLatency = opts.RequestLatency } + if opts.ResolverLatency != nil { + ResolverLatency = opts.ResolverLatency + } if opts.RequestSize != nil { RequestSize = opts.RequestSize } @@ -118,6 +157,15 @@ func Register(opts RegisterOpts) { if opts.ExecPluginCalls != nil { ExecPluginCalls = opts.ExecPluginCalls } + if opts.RequestRetry != nil { + RequestRetry = opts.RequestRetry + } + if opts.TransportCacheEntries != nil { + TransportCacheEntries = opts.TransportCacheEntries + } + if opts.TransportCreateCalls != nil { + TransportCreateCalls = opts.TransportCreateCalls + } }) } @@ -133,6 +181,11 @@ type noopLatency struct{} func (noopLatency) Observe(context.Context, string, url.URL, time.Duration) {} +type noopResolverLatency struct{} + +func (n noopResolverLatency) Observe(ctx context.Context, host string, latency time.Duration) { +} + type noopSize struct{} func (noopSize) Observe(context.Context, string, string, float64) {} @@ -144,3 +197,15 @@ func (noopResult) Increment(context.Context, string, string, string) {} type noopCalls struct{} func (noopCalls) Increment(int, string) {} + +type noopRetry struct{} + +func (noopRetry) IncrementRetry(context.Context, string, string, string) {} + +type noopTransportCache struct{} + +func (noopTransportCache) Observe(int) {} + +type noopTransportCreateCalls struct{} + +func (noopTransportCreateCalls) Increment(string) {} diff --git a/vendor/k8s.io/client-go/tools/pager/pager.go b/vendor/k8s.io/client-go/tools/pager/pager.go index 805859e09..3c77cc37f 100644 --- a/vendor/k8s.io/client-go/tools/pager/pager.go +++ b/vendor/k8s.io/client-go/tools/pager/pager.go @@ -73,7 +73,23 @@ func New(fn ListPageFunc) *ListPager { // List returns a single list object, but attempts to retrieve smaller chunks from the // server to reduce the impact on the server. If the chunk attempt fails, it will load // the full list instead. The Limit field on options, if unset, will default to the page size. +// +// If items in the returned list are retained for different durations, and you want to avoid +// retaining the whole slice returned by p.PageFn as long as any item is referenced, +// use ListWithAlloc instead. func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runtime.Object, bool, error) { + return p.list(ctx, options, false) +} + +// ListWithAlloc works like List, but avoids retaining references to the items slice returned by p.PageFn. +// It does this by making a shallow copy of non-pointer items in the slice returned by p.PageFn. +// +// If the items in the returned list are not retained, or are retained for the same duration, use List instead for memory efficiency. +func (p *ListPager) ListWithAlloc(ctx context.Context, options metav1.ListOptions) (runtime.Object, bool, error) { + return p.list(ctx, options, true) +} + +func (p *ListPager) list(ctx context.Context, options metav1.ListOptions, allocNew bool) (runtime.Object, bool, error) { if options.Limit == 0 { options.Limit = p.PageSize } @@ -123,7 +139,11 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti list.ResourceVersion = m.GetResourceVersion() list.SelfLink = m.GetSelfLink() } - if err := meta.EachListItem(obj, func(obj runtime.Object) error { + eachListItemFunc := meta.EachListItem + if allocNew { + eachListItemFunc = meta.EachListItemWithAlloc + } + if err := eachListItemFunc(obj, func(obj runtime.Object) error { list.Items = append(list.Items, obj) return nil }); err != nil { @@ -156,12 +176,26 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti // // Items are retrieved in chunks from the server to reduce the impact on the server with up to // ListPager.PageBufferSize chunks buffered concurrently in the background. +// +// If items passed to fn are retained for different durations, and you want to avoid +// retaining the whole slice returned by p.PageFn as long as any item is referenced, +// use EachListItemWithAlloc instead. func (p *ListPager) EachListItem(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error { return meta.EachListItem(obj, fn) }) } +// EachListItemWithAlloc works like EachListItem, but avoids retaining references to the items slice returned by p.PageFn. +// It does this by making a shallow copy of non-pointer items in the slice returned by p.PageFn. +// +// If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency. +func (p *ListPager) EachListItemWithAlloc(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error { + return meta.EachListItemWithAlloc(obj, fn) + }) +} + // eachListChunkBuffered fetches runtimeObject list chunks using this ListPager and invokes fn on // each list chunk. If fn returns an error, processing stops and that error is returned. If fn does // not return an error, any error encountered while retrieving the list from the server is @@ -203,6 +237,11 @@ func (p *ListPager) eachListChunkBuffered(ctx context.Context, options metav1.Li }() for o := range chunkC { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } err := fn(o) if err != nil { return err // any fn error should be returned immediately diff --git a/vendor/k8s.io/client-go/tools/reference/ref.go b/vendor/k8s.io/client-go/tools/reference/ref.go index 442a991cc..5d4ec3743 100644 --- a/vendor/k8s.io/client-go/tools/reference/ref.go +++ b/vendor/k8s.io/client-go/tools/reference/ref.go @@ -34,7 +34,7 @@ var ( // GetReference returns an ObjectReference which refers to the given // object, or an error if the object doesn't follow the conventions // that would allow this. -// TODO: should take a meta.Interface see http://issue.k8s.io/7127 +// TODO: should take a meta.Interface see https://issue.k8s.io/7127 func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*v1.ObjectReference, error) { if obj == nil { return nil, ErrNilObject diff --git a/vendor/k8s.io/client-go/tools/remotecommand/doc.go b/vendor/k8s.io/client-go/tools/remotecommand/doc.go new file mode 100644 index 000000000..ac06a9cd3 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package remotecommand adds support for executing commands in containers, +// with support for separate stdin, stdout, and stderr streams, as well as +// TTY. +package remotecommand // import "k8s.io/client-go/tools/remotecommand" diff --git a/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go b/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go new file mode 100644 index 000000000..e60dd7cdc --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go @@ -0,0 +1,54 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "fmt" + "io" + + "k8s.io/apimachinery/pkg/util/runtime" +) + +// errorStreamDecoder interprets the data on the error channel and creates a go error object from it. +type errorStreamDecoder interface { + decode(message []byte) error +} + +// watchErrorStream watches the errorStream for remote command error data, +// decodes it with the given errorStreamDecoder, sends the decoded error (or nil if the remote +// command exited successfully) to the returned error channel, and closes it. +// This function returns immediately. +func watchErrorStream(errorStream io.Reader, d errorStreamDecoder) chan error { + errorChan := make(chan error) + + go func() { + defer runtime.HandleCrash() + + message, err := io.ReadAll(errorStream) + switch { + case err != nil && err != io.EOF: + errorChan <- fmt.Errorf("error reading from error stream: %s", err) + case len(message) > 0: + errorChan <- d.decode(message) + default: + errorChan <- nil + } + close(errorChan) + }() + + return errorChan +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/reader.go b/vendor/k8s.io/client-go/tools/remotecommand/reader.go new file mode 100644 index 000000000..d1f1be34c --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/reader.go @@ -0,0 +1,41 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "io" +) + +// readerWrapper delegates to an io.Reader so that only the io.Reader interface is implemented, +// to keep io.Copy from doing things we don't want when copying from the reader to the data stream. +// +// If the Stdin io.Reader provided to remotecommand implements a WriteTo function (like bytes.Buffer does[1]), +// io.Copy calls that method[2] to attempt to write the entire buffer to the stream in one call. +// That results in an oversized call to spdystream.Stream#Write [3], +// which results in a single oversized data frame[4] that is too large. +// +// [1] https://golang.org/pkg/bytes/#Buffer.WriteTo +// [2] https://golang.org/pkg/io/#Copy +// [3] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/stream.go#L66-L73 +// [4] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/spdy/write.go#L302-L304 +type readerWrapper struct { + reader io.Reader +} + +func (r readerWrapper) Read(p []byte) (int, error) { + return r.reader.Read(p) +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go new file mode 100644 index 000000000..662a3cb4a --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go @@ -0,0 +1,182 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/remotecommand" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/transport/spdy" +) + +// StreamOptions holds information pertaining to the current streaming session: +// input/output streams, if the client is requesting a TTY, and a terminal size queue to +// support terminal resizing. +type StreamOptions struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + Tty bool + TerminalSizeQueue TerminalSizeQueue +} + +// Executor is an interface for transporting shell-style streams. +type Executor interface { + // Deprecated: use StreamWithContext instead to avoid possible resource leaks. + // See https://github.com/kubernetes/kubernetes/pull/103177 for details. + Stream(options StreamOptions) error + + // StreamWithContext initiates the transport of the standard shell streams. It will + // transport any non-nil stream to a remote system, and return an error if a problem + // occurs. If tty is set, the stderr stream is not used (raw TTY manages stdout and + // stderr over the stdout stream). + // The context controls the entire lifetime of stream execution. + StreamWithContext(ctx context.Context, options StreamOptions) error +} + +type streamCreator interface { + CreateStream(headers http.Header) (httpstream.Stream, error) +} + +type streamProtocolHandler interface { + stream(conn streamCreator) error +} + +// streamExecutor handles transporting standard shell streams over an httpstream connection. +type streamExecutor struct { + upgrader spdy.Upgrader + transport http.RoundTripper + + method string + url *url.URL + protocols []string +} + +// NewSPDYExecutor connects to the provided server and upgrades the connection to +// multiplexed bidirectional streams. +func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) { + wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config) + if err != nil { + return nil, err + } + return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url) +} + +// NewSPDYExecutorForTransports connects to the provided server using the given transport, +// upgrades the response using the given upgrader to multiplexed bidirectional streams. +func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) { + return NewSPDYExecutorForProtocols( + transport, upgrader, method, url, + remotecommand.StreamProtocolV4Name, + remotecommand.StreamProtocolV3Name, + remotecommand.StreamProtocolV2Name, + remotecommand.StreamProtocolV1Name, + ) +} + +// NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to +// multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most +// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports. +func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) { + return &streamExecutor{ + upgrader: upgrader, + transport: transport, + method: method, + url: url, + protocols: protocols, + }, nil +} + +// Stream opens a protocol streamer to the server and streams until a client closes +// the connection or the server disconnects. +func (e *streamExecutor) Stream(options StreamOptions) error { + return e.StreamWithContext(context.Background(), options) +} + +// newConnectionAndStream creates a new SPDY connection and a stream protocol handler upon it. +func (e *streamExecutor) newConnectionAndStream(ctx context.Context, options StreamOptions) (httpstream.Connection, streamProtocolHandler, error) { + req, err := http.NewRequestWithContext(ctx, e.method, e.url.String(), nil) + if err != nil { + return nil, nil, fmt.Errorf("error creating request: %v", err) + } + + conn, protocol, err := spdy.Negotiate( + e.upgrader, + &http.Client{Transport: e.transport}, + req, + e.protocols..., + ) + if err != nil { + return nil, nil, err + } + + var streamer streamProtocolHandler + + switch protocol { + case remotecommand.StreamProtocolV4Name: + streamer = newStreamProtocolV4(options) + case remotecommand.StreamProtocolV3Name: + streamer = newStreamProtocolV3(options) + case remotecommand.StreamProtocolV2Name: + streamer = newStreamProtocolV2(options) + case "": + klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) + fallthrough + case remotecommand.StreamProtocolV1Name: + streamer = newStreamProtocolV1(options) + } + + return conn, streamer, nil +} + +// StreamWithContext opens a protocol streamer to the server and streams until a client closes +// the connection or the server disconnects or the context is done. +func (e *streamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error { + conn, streamer, err := e.newConnectionAndStream(ctx, options) + if err != nil { + return err + } + defer conn.Close() + + panicChan := make(chan any, 1) + errorChan := make(chan error, 1) + go func() { + defer func() { + if p := recover(); p != nil { + panicChan <- p + } + }() + errorChan <- streamer.stream(conn) + }() + + select { + case p := <-panicChan: + panic(p) + case err := <-errorChan: + return err + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/resize.go b/vendor/k8s.io/client-go/tools/remotecommand/resize.go new file mode 100644 index 000000000..c838f21ba --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/resize.go @@ -0,0 +1,33 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +// TerminalSize and TerminalSizeQueue was a part of k8s.io/kubernetes/pkg/util/term +// and were moved in order to decouple client from other term dependencies + +// TerminalSize represents the width and height of a terminal. +type TerminalSize struct { + Width uint16 + Height uint16 +} + +// TerminalSizeQueue is capable of returning terminal resize events as they occur. +type TerminalSizeQueue interface { + // Next returns the new terminal size after the terminal has been resized. It returns nil when + // monitoring has been stopped. + Next() *TerminalSize +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v1.go b/vendor/k8s.io/client-go/tools/remotecommand/v1.go new file mode 100644 index 000000000..efa9a6c99 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/v1.go @@ -0,0 +1,159 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "fmt" + "io" + "net/http" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/klog/v2" +) + +// streamProtocolV1 implements the first version of the streaming exec & attach +// protocol. This version has some bugs, such as not being able to detect when +// non-interactive stdin data has ended. See https://issues.k8s.io/13394 and +// https://issues.k8s.io/13395 for more details. +type streamProtocolV1 struct { + StreamOptions + + errorStream httpstream.Stream + remoteStdin httpstream.Stream + remoteStdout httpstream.Stream + remoteStderr httpstream.Stream +} + +var _ streamProtocolHandler = &streamProtocolV1{} + +func newStreamProtocolV1(options StreamOptions) streamProtocolHandler { + return &streamProtocolV1{ + StreamOptions: options, + } +} + +func (p *streamProtocolV1) stream(conn streamCreator) error { + doneChan := make(chan struct{}, 2) + errorChan := make(chan error) + + cp := func(s string, dst io.Writer, src io.Reader) { + klog.V(6).Infof("Copying %s", s) + defer klog.V(6).Infof("Done copying %s", s) + if _, err := io.Copy(dst, src); err != nil && err != io.EOF { + klog.Errorf("Error copying %s: %v", s, err) + } + if s == v1.StreamTypeStdout || s == v1.StreamTypeStderr { + doneChan <- struct{}{} + } + } + + // set up all the streams first + var err error + headers := http.Header{} + headers.Set(v1.StreamType, v1.StreamTypeError) + p.errorStream, err = conn.CreateStream(headers) + if err != nil { + return err + } + defer p.errorStream.Reset() + + // Create all the streams first, then start the copy goroutines. The server doesn't start its copy + // goroutines until it's received all of the streams. If the client creates the stdin stream and + // immediately begins copying stdin data to the server, it's possible to overwhelm and wedge the + // spdy frame handler in the server so that it is full of unprocessed frames. The frames aren't + // getting processed because the server hasn't started its copying, and it won't do that until it + // gets all the streams. By creating all the streams first, we ensure that the server is ready to + // process data before the client starts sending any. See https://issues.k8s.io/16373 for more info. + if p.Stdin != nil { + headers.Set(v1.StreamType, v1.StreamTypeStdin) + p.remoteStdin, err = conn.CreateStream(headers) + if err != nil { + return err + } + defer p.remoteStdin.Reset() + } + + if p.Stdout != nil { + headers.Set(v1.StreamType, v1.StreamTypeStdout) + p.remoteStdout, err = conn.CreateStream(headers) + if err != nil { + return err + } + defer p.remoteStdout.Reset() + } + + if p.Stderr != nil && !p.Tty { + headers.Set(v1.StreamType, v1.StreamTypeStderr) + p.remoteStderr, err = conn.CreateStream(headers) + if err != nil { + return err + } + defer p.remoteStderr.Reset() + } + + // now that all the streams have been created, proceed with reading & copying + + // always read from errorStream + go func() { + message, err := io.ReadAll(p.errorStream) + if err != nil && err != io.EOF { + errorChan <- fmt.Errorf("Error reading from error stream: %s", err) + return + } + if len(message) > 0 { + errorChan <- fmt.Errorf("Error executing remote command: %s", message) + return + } + }() + + if p.Stdin != nil { + // TODO this goroutine will never exit cleanly (the io.Copy never unblocks) + // because stdin is not closed until the process exits. If we try to call + // stdin.Close(), it returns no error but doesn't unblock the copy. It will + // exit when the process exits, instead. + go cp(v1.StreamTypeStdin, p.remoteStdin, readerWrapper{p.Stdin}) + } + + waitCount := 0 + completedStreams := 0 + + if p.Stdout != nil { + waitCount++ + go cp(v1.StreamTypeStdout, p.Stdout, p.remoteStdout) + } + + if p.Stderr != nil && !p.Tty { + waitCount++ + go cp(v1.StreamTypeStderr, p.Stderr, p.remoteStderr) + } + +Loop: + for { + select { + case <-doneChan: + completedStreams++ + if completedStreams == waitCount { + break Loop + } + case err := <-errorChan: + return err + } + } + + return nil +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v2.go b/vendor/k8s.io/client-go/tools/remotecommand/v2.go new file mode 100644 index 000000000..d54612f4c --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/v2.go @@ -0,0 +1,199 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "fmt" + "io" + "net/http" + "sync" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/runtime" +) + +// streamProtocolV2 implements version 2 of the streaming protocol for attach +// and exec. The original streaming protocol was metav1. As a result, this +// version is referred to as version 2, even though it is the first actual +// numbered version. +type streamProtocolV2 struct { + StreamOptions + + errorStream io.Reader + remoteStdin io.ReadWriteCloser + remoteStdout io.Reader + remoteStderr io.Reader +} + +var _ streamProtocolHandler = &streamProtocolV2{} + +func newStreamProtocolV2(options StreamOptions) streamProtocolHandler { + return &streamProtocolV2{ + StreamOptions: options, + } +} + +func (p *streamProtocolV2) createStreams(conn streamCreator) error { + var err error + headers := http.Header{} + + // set up error stream + headers.Set(v1.StreamType, v1.StreamTypeError) + p.errorStream, err = conn.CreateStream(headers) + if err != nil { + return err + } + + // set up stdin stream + if p.Stdin != nil { + headers.Set(v1.StreamType, v1.StreamTypeStdin) + p.remoteStdin, err = conn.CreateStream(headers) + if err != nil { + return err + } + } + + // set up stdout stream + if p.Stdout != nil { + headers.Set(v1.StreamType, v1.StreamTypeStdout) + p.remoteStdout, err = conn.CreateStream(headers) + if err != nil { + return err + } + } + + // set up stderr stream + if p.Stderr != nil && !p.Tty { + headers.Set(v1.StreamType, v1.StreamTypeStderr) + p.remoteStderr, err = conn.CreateStream(headers) + if err != nil { + return err + } + } + return nil +} + +func (p *streamProtocolV2) copyStdin() { + if p.Stdin != nil { + var once sync.Once + + // copy from client's stdin to container's stdin + go func() { + defer runtime.HandleCrash() + + // if p.stdin is noninteractive, p.g. `echo abc | kubectl exec -i -- cat`, make sure + // we close remoteStdin as soon as the copy from p.stdin to remoteStdin finishes. Otherwise + // the executed command will remain running. + defer once.Do(func() { p.remoteStdin.Close() }) + + if _, err := io.Copy(p.remoteStdin, readerWrapper{p.Stdin}); err != nil { + runtime.HandleError(err) + } + }() + + // read from remoteStdin until the stream is closed. this is essential to + // be able to exit interactive sessions cleanly and not leak goroutines or + // hang the client's terminal. + // + // TODO we aren't using go-dockerclient any more; revisit this to determine if it's still + // required by engine-api. + // + // go-dockerclient's current hijack implementation + // (https://github.com/fsouza/go-dockerclient/blob/89f3d56d93788dfe85f864a44f85d9738fca0670/client.go#L564) + // waits for all three streams (stdin/stdout/stderr) to finish copying + // before returning. When hijack finishes copying stdout/stderr, it calls + // Close() on its side of remoteStdin, which allows this copy to complete. + // When that happens, we must Close() on our side of remoteStdin, to + // allow the copy in hijack to complete, and hijack to return. + go func() { + defer runtime.HandleCrash() + defer once.Do(func() { p.remoteStdin.Close() }) + + // this "copy" doesn't actually read anything - it's just here to wait for + // the server to close remoteStdin. + if _, err := io.Copy(io.Discard, p.remoteStdin); err != nil { + runtime.HandleError(err) + } + }() + } +} + +func (p *streamProtocolV2) copyStdout(wg *sync.WaitGroup) { + if p.Stdout == nil { + return + } + + wg.Add(1) + go func() { + defer runtime.HandleCrash() + defer wg.Done() + // make sure, packet in queue can be consumed. + // block in queue may lead to deadlock in conn.server + // issue: https://github.com/kubernetes/kubernetes/issues/96339 + defer io.Copy(io.Discard, p.remoteStdout) + + if _, err := io.Copy(p.Stdout, p.remoteStdout); err != nil { + runtime.HandleError(err) + } + }() +} + +func (p *streamProtocolV2) copyStderr(wg *sync.WaitGroup) { + if p.Stderr == nil || p.Tty { + return + } + + wg.Add(1) + go func() { + defer runtime.HandleCrash() + defer wg.Done() + defer io.Copy(io.Discard, p.remoteStderr) + + if _, err := io.Copy(p.Stderr, p.remoteStderr); err != nil { + runtime.HandleError(err) + } + }() +} + +func (p *streamProtocolV2) stream(conn streamCreator) error { + if err := p.createStreams(conn); err != nil { + return err + } + + // now that all the streams have been created, proceed with reading & copying + + errorChan := watchErrorStream(p.errorStream, &errorDecoderV2{}) + + p.copyStdin() + + var wg sync.WaitGroup + p.copyStdout(&wg) + p.copyStderr(&wg) + + // we're waiting for stdout/stderr to finish copying + wg.Wait() + + // waits for errorStream to finish reading with an error or nil + return <-errorChan +} + +// errorDecoderV2 interprets the error channel data as plain text. +type errorDecoderV2 struct{} + +func (d *errorDecoderV2) decode(message []byte) error { + return fmt.Errorf("error executing remote command: %s", message) +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v3.go b/vendor/k8s.io/client-go/tools/remotecommand/v3.go new file mode 100644 index 000000000..846dd24a5 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/v3.go @@ -0,0 +1,111 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "encoding/json" + "io" + "net/http" + "sync" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/runtime" +) + +// streamProtocolV3 implements version 3 of the streaming protocol for attach +// and exec. This version adds support for resizing the container's terminal. +type streamProtocolV3 struct { + *streamProtocolV2 + + resizeStream io.Writer +} + +var _ streamProtocolHandler = &streamProtocolV3{} + +func newStreamProtocolV3(options StreamOptions) streamProtocolHandler { + return &streamProtocolV3{ + streamProtocolV2: newStreamProtocolV2(options).(*streamProtocolV2), + } +} + +func (p *streamProtocolV3) createStreams(conn streamCreator) error { + // set up the streams from v2 + if err := p.streamProtocolV2.createStreams(conn); err != nil { + return err + } + + // set up resize stream + if p.Tty { + headers := http.Header{} + headers.Set(v1.StreamType, v1.StreamTypeResize) + var err error + p.resizeStream, err = conn.CreateStream(headers) + if err != nil { + return err + } + } + + return nil +} + +func (p *streamProtocolV3) handleResizes() { + if p.resizeStream == nil || p.TerminalSizeQueue == nil { + return + } + go func() { + defer runtime.HandleCrash() + + encoder := json.NewEncoder(p.resizeStream) + for { + size := p.TerminalSizeQueue.Next() + if size == nil { + return + } + if err := encoder.Encode(&size); err != nil { + runtime.HandleError(err) + } + } + }() +} + +func (p *streamProtocolV3) stream(conn streamCreator) error { + if err := p.createStreams(conn); err != nil { + return err + } + + // now that all the streams have been created, proceed with reading & copying + + errorChan := watchErrorStream(p.errorStream, &errorDecoderV3{}) + + p.handleResizes() + + p.copyStdin() + + var wg sync.WaitGroup + p.copyStdout(&wg) + p.copyStderr(&wg) + + // we're waiting for stdout/stderr to finish copying + wg.Wait() + + // waits for errorStream to finish reading with an error or nil + return <-errorChan +} + +type errorDecoderV3 struct { + errorDecoderV2 +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v4.go b/vendor/k8s.io/client-go/tools/remotecommand/v4.go new file mode 100644 index 000000000..69ca934a0 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/v4.go @@ -0,0 +1,119 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "sync" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/remotecommand" + "k8s.io/client-go/util/exec" +) + +// streamProtocolV4 implements version 4 of the streaming protocol for attach +// and exec. This version adds support for exit codes on the error stream through +// the use of metav1.Status instead of plain text messages. +type streamProtocolV4 struct { + *streamProtocolV3 +} + +var _ streamProtocolHandler = &streamProtocolV4{} + +func newStreamProtocolV4(options StreamOptions) streamProtocolHandler { + return &streamProtocolV4{ + streamProtocolV3: newStreamProtocolV3(options).(*streamProtocolV3), + } +} + +func (p *streamProtocolV4) createStreams(conn streamCreator) error { + return p.streamProtocolV3.createStreams(conn) +} + +func (p *streamProtocolV4) handleResizes() { + p.streamProtocolV3.handleResizes() +} + +func (p *streamProtocolV4) stream(conn streamCreator) error { + if err := p.createStreams(conn); err != nil { + return err + } + + // now that all the streams have been created, proceed with reading & copying + + errorChan := watchErrorStream(p.errorStream, &errorDecoderV4{}) + + p.handleResizes() + + p.copyStdin() + + var wg sync.WaitGroup + p.copyStdout(&wg) + p.copyStderr(&wg) + + // we're waiting for stdout/stderr to finish copying + wg.Wait() + + // waits for errorStream to finish reading with an error or nil + return <-errorChan +} + +// errorDecoderV4 interprets the json-marshaled metav1.Status on the error channel +// and creates an exec.ExitError from it. +type errorDecoderV4 struct{} + +func (d *errorDecoderV4) decode(message []byte) error { + status := metav1.Status{} + err := json.Unmarshal(message, &status) + if err != nil { + return fmt.Errorf("error stream protocol error: %v in %q", err, string(message)) + } + switch status.Status { + case metav1.StatusSuccess: + return nil + case metav1.StatusFailure: + if status.Reason == remotecommand.NonZeroExitCodeReason { + if status.Details == nil { + return errors.New("error stream protocol error: details must be set") + } + for i := range status.Details.Causes { + c := &status.Details.Causes[i] + if c.Type != remotecommand.ExitCodeCauseType { + continue + } + + rc, err := strconv.ParseUint(c.Message, 10, 8) + if err != nil { + return fmt.Errorf("error stream protocol error: invalid exit code value %q", c.Message) + } + return exec.CodeExitError{ + Err: fmt.Errorf("command terminated with exit code %d", rc), + Code: int(rc), + } + } + + return fmt.Errorf("error stream protocol error: no %s cause given", remotecommand.ExitCodeCauseType) + } + default: + return errors.New("error stream protocol error: unknown error") + } + + return fmt.Errorf(status.Message) +} diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go index 5fe768ed5..7c7f1b330 100644 --- a/vendor/k8s.io/client-go/transport/cache.go +++ b/vendor/k8s.io/client-go/transport/cache.go @@ -17,6 +17,7 @@ limitations under the License. package transport import ( + "context" "fmt" "net" "net/http" @@ -26,6 +27,7 @@ import ( utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/metrics" ) // TlsTransportCache caches TLS http.RoundTrippers different configurations. The @@ -36,6 +38,11 @@ type tlsTransportCache struct { transports map[tlsCacheKey]*http.Transport } +// DialerStopCh is stop channel that is passed down to dynamic cert dialer. +// It's exposed as variable for testing purposes to avoid testing for goroutine +// leakages. +var DialerStopCh = wait.NeverStop + const idleConnsPerHost = 25 var tlsCache = &tlsTransportCache{transports: make(map[tlsCacheKey]*http.Transport)} @@ -50,6 +57,9 @@ type tlsCacheKey struct { serverName string nextProtos string disableCompression bool + // these functions are wrapped to allow them to be used as map keys + getCert *GetCertHolder + dial *DialHolder } func (t tlsCacheKey) String() string { @@ -57,7 +67,8 @@ func (t tlsCacheKey) String() string { if len(t.keyData) > 0 { keyText = "" } - return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, serverName:%s, disableCompression:%t", t.insecure, t.caData, t.certData, keyText, t.serverName, t.disableCompression) + return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, serverName:%s, disableCompression:%t, getCert:%p, dial:%p", + t.insecure, t.caData, t.certData, keyText, t.serverName, t.disableCompression, t.getCert, t.dial) } func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { @@ -70,11 +81,16 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { // Ensure we only create a single transport for the given TLS options c.mu.Lock() defer c.mu.Unlock() + defer metrics.TransportCacheEntries.Observe(len(c.transports)) // See if we already have a custom transport for this config if t, ok := c.transports[key]; ok { + metrics.TransportCreateCalls.Increment("hit") return t, nil } + metrics.TransportCreateCalls.Increment("miss") + } else { + metrics.TransportCreateCalls.Increment("uncacheable") } // Get the TLS options for this client config @@ -83,12 +99,14 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { return nil, err } // The options didn't require a custom TLS config - if tlsConfig == nil && config.Dial == nil && config.Proxy == nil { + if tlsConfig == nil && config.DialHolder == nil && config.Proxy == nil { return http.DefaultTransport, nil } - dial := config.Dial - if dial == nil { + var dial func(ctx context.Context, network, address string) (net.Conn, error) + if config.DialHolder != nil { + dial = config.DialHolder.Dial + } else { dial = (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, @@ -97,11 +115,11 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { // If we use are reloading files, we need to handle certificate rotation properly // TODO(jackkleeman): We can also add rotation here when config.HasCertCallback() is true - if config.TLS.ReloadTLSFiles { + if config.TLS.ReloadTLSFiles && tlsConfig != nil && tlsConfig.GetClientCertificate != nil { dynamicCertDialer := certRotatingDialer(tlsConfig.GetClientCertificate, dial) tlsConfig.GetClientCertificate = dynamicCertDialer.GetClientCertificate dial = dynamicCertDialer.connDialer.DialContext - go dynamicCertDialer.Run(wait.NeverStop) + go dynamicCertDialer.Run(DialerStopCh) } proxy := http.ProxyFromEnvironment @@ -133,7 +151,7 @@ func tlsConfigKey(c *Config) (tlsCacheKey, bool, error) { return tlsCacheKey{}, false, err } - if c.TLS.GetCert != nil || c.Dial != nil || c.Proxy != nil { + if c.Proxy != nil { // cannot determine equality for functions return tlsCacheKey{}, false, nil } @@ -144,6 +162,8 @@ func tlsConfigKey(c *Config) (tlsCacheKey, bool, error) { serverName: c.TLS.ServerName, nextProtos: strings.Join(c.TLS.NextProtos, ","), disableCompression: c.DisableCompression, + getCert: c.TLS.GetCertHolder, + dial: c.DialHolder, } if c.TLS.ReloadTLSFiles { diff --git a/vendor/k8s.io/client-go/transport/cache_go118.go b/vendor/k8s.io/client-go/transport/cache_go118.go new file mode 100644 index 000000000..d21d5137d --- /dev/null +++ b/vendor/k8s.io/client-go/transport/cache_go118.go @@ -0,0 +1,24 @@ +//go:build go1.18 + +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +// assert at compile time that tlsCacheKey is comparable in a way that will never panic at runtime. +var _ = isComparable[tlsCacheKey] + +func isComparable[T comparable]() {} diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go index 89de798f6..d8a3d64b3 100644 --- a/vendor/k8s.io/client-go/transport/config.go +++ b/vendor/k8s.io/client-go/transport/config.go @@ -67,8 +67,9 @@ type Config struct { // instead of setting this value directly. WrapTransport WrapperFunc - // Dial specifies the dial function for creating unencrypted TCP connections. - Dial func(ctx context.Context, network, address string) (net.Conn, error) + // DialHolder specifies the dial function for creating unencrypted TCP connections. + // This struct indirection is used to make transport configs cacheable. + DialHolder *DialHolder // Proxy is the proxy func to be used for all requests made by this // transport. If Proxy is nil, http.ProxyFromEnvironment is used. If Proxy @@ -78,6 +79,11 @@ type Config struct { Proxy func(*http.Request) (*url.URL, error) } +// DialHolder is used to make the wrapped function comparable so that it can be used as a map key. +type DialHolder struct { + Dial func(ctx context.Context, network, address string) (net.Conn, error) +} + // ImpersonationConfig has all the available impersonation options type ImpersonationConfig struct { // UserName matches user.Info.GetName() @@ -112,7 +118,7 @@ func (c *Config) HasCertAuth() bool { // HasCertCallback returns whether the configuration has certificate callback or not. func (c *Config) HasCertCallback() bool { - return c.TLS.GetCert != nil + return c.TLS.GetCertHolder != nil } // Wrap adds a transport middleware function that will give the caller @@ -143,5 +149,12 @@ type TLSConfig struct { // To use only http/1.1, set to ["http/1.1"]. NextProtos []string - GetCert func() (*tls.Certificate, error) // Callback that returns a TLS client certificate. CertData, CertFile, KeyData and KeyFile supercede this field. + // Callback that returns a TLS client certificate. CertData, CertFile, KeyData and KeyFile supercede this field. + // This struct indirection is used to make transport configs cacheable. + GetCertHolder *GetCertHolder +} + +// GetCertHolder is used to make the wrapped function comparable so that it can be used as a map key. +type GetCertHolder struct { + GetCert func() (*tls.Certificate, error) } diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index 26a89f93b..e2d1dcc9a 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -491,7 +491,7 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e DNSDone: func(info httptrace.DNSDoneInfo) { reqInfo.muTrace.Lock() defer reqInfo.muTrace.Unlock() - reqInfo.DNSLookup = time.Now().Sub(dnsStart) + reqInfo.DNSLookup = time.Since(dnsStart) klog.Infof("HTTP Trace: DNS Lookup for %s resolved to %v", host, info.Addrs) }, // Dial @@ -503,7 +503,7 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e ConnectDone: func(network, addr string, err error) { reqInfo.muTrace.Lock() defer reqInfo.muTrace.Unlock() - reqInfo.Dialing = time.Now().Sub(dialStart) + reqInfo.Dialing = time.Since(dialStart) if err != nil { klog.Infof("HTTP Trace: Dial to %s:%s failed: %v", network, addr, err) } else { @@ -517,7 +517,7 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e TLSHandshakeDone: func(_ tls.ConnectionState, _ error) { reqInfo.muTrace.Lock() defer reqInfo.muTrace.Unlock() - reqInfo.TLSHandshake = time.Now().Sub(tlsStart) + reqInfo.TLSHandshake = time.Since(tlsStart) }, // Connection (it can be DNS + Dial or just the time to get one from the connection pool) GetConn: func(hostPort string) { @@ -526,7 +526,7 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e GotConn: func(info httptrace.GotConnInfo) { reqInfo.muTrace.Lock() defer reqInfo.muTrace.Unlock() - reqInfo.GetConnection = time.Now().Sub(getConn) + reqInfo.GetConnection = time.Since(getConn) reqInfo.ConnectionReused = info.Reused }, // Server Processing (time since we wrote the request until first byte is received) @@ -538,7 +538,7 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e GotFirstResponseByte: func() { reqInfo.muTrace.Lock() defer reqInfo.muTrace.Unlock() - reqInfo.ServerProcessing = time.Now().Sub(serverStart) + reqInfo.ServerProcessing = time.Since(serverStart) }, } req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace)) diff --git a/vendor/k8s.io/client-go/transport/spdy/spdy.go b/vendor/k8s.io/client-go/transport/spdy/spdy.go new file mode 100644 index 000000000..f50b68e5f --- /dev/null +++ b/vendor/k8s.io/client-go/transport/spdy/spdy.go @@ -0,0 +1,103 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "fmt" + "net/http" + "net/url" + "time" + + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/httpstream/spdy" + restclient "k8s.io/client-go/rest" +) + +// Upgrader validates a response from the server after a SPDY upgrade. +type Upgrader interface { + // NewConnection validates the response and creates a new Connection. + NewConnection(resp *http.Response) (httpstream.Connection, error) +} + +// RoundTripperFor returns a round tripper and upgrader to use with SPDY. +func RoundTripperFor(config *restclient.Config) (http.RoundTripper, Upgrader, error) { + tlsConfig, err := restclient.TLSConfigFor(config) + if err != nil { + return nil, nil, err + } + proxy := http.ProxyFromEnvironment + if config.Proxy != nil { + proxy = config.Proxy + } + upgradeRoundTripper := spdy.NewRoundTripperWithConfig(spdy.RoundTripperConfig{ + TLS: tlsConfig, + Proxier: proxy, + PingPeriod: time.Second * 5, + }) + wrapper, err := restclient.HTTPWrappersForConfig(config, upgradeRoundTripper) + if err != nil { + return nil, nil, err + } + return wrapper, upgradeRoundTripper, nil +} + +// dialer implements the httpstream.Dialer interface. +type dialer struct { + client *http.Client + upgrader Upgrader + method string + url *url.URL +} + +var _ httpstream.Dialer = &dialer{} + +// NewDialer will create a dialer that connects to the provided URL and upgrades the connection to SPDY. +func NewDialer(upgrader Upgrader, client *http.Client, method string, url *url.URL) httpstream.Dialer { + return &dialer{ + client: client, + upgrader: upgrader, + method: method, + url: url, + } +} + +func (d *dialer) Dial(protocols ...string) (httpstream.Connection, string, error) { + req, err := http.NewRequest(d.method, d.url.String(), nil) + if err != nil { + return nil, "", fmt.Errorf("error creating request: %v", err) + } + return Negotiate(d.upgrader, d.client, req, protocols...) +} + +// Negotiate opens a connection to a remote server and attempts to negotiate +// a SPDY connection. Upon success, it returns the connection and the protocol selected by +// the server. The client transport must use the upgradeRoundTripper - see RoundTripperFor. +func Negotiate(upgrader Upgrader, client *http.Client, req *http.Request, protocols ...string) (httpstream.Connection, string, error) { + for i := range protocols { + req.Header.Add(httpstream.HeaderProtocolVersion, protocols[i]) + } + resp, err := client.Do(req) + if err != nil { + return nil, "", fmt.Errorf("error sending request: %v", err) + } + defer resp.Body.Close() + conn, err := upgrader.NewConnection(resp) + if err != nil { + return nil, "", err + } + return conn, resp.Header.Get(httpstream.HeaderProtocolVersion), nil +} diff --git a/vendor/k8s.io/client-go/transport/token_source.go b/vendor/k8s.io/client-go/transport/token_source.go index 68a0a704f..8e312800f 100644 --- a/vendor/k8s.io/client-go/transport/token_source.go +++ b/vendor/k8s.io/client-go/transport/token_source.go @@ -18,8 +18,8 @@ package transport import ( "fmt" - "io/ioutil" "net/http" + "os" "strings" "sync" "time" @@ -132,7 +132,7 @@ type fileTokenSource struct { var _ = oauth2.TokenSource(&fileTokenSource{}) func (ts *fileTokenSource) Token() (*oauth2.Token, error) { - tokb, err := ioutil.ReadFile(ts.path) + tokb, err := os.ReadFile(ts.path) if err != nil { return nil, fmt.Errorf("failed to read token file %q: %v", ts.path, err) } diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go index b4a7bfa67..78060719a 100644 --- a/vendor/k8s.io/client-go/transport/transport.go +++ b/vendor/k8s.io/client-go/transport/transport.go @@ -22,8 +22,8 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "io/ioutil" "net/http" + "os" "sync" "time" @@ -39,6 +39,10 @@ func New(config *Config) (http.RoundTripper, error) { return nil, fmt.Errorf("using a custom transport with TLS certificate options or the insecure flag is not allowed") } + if !isValidHolders(config) { + return nil, fmt.Errorf("misconfigured holder for dialer or cert callback") + } + var ( rt http.RoundTripper err error @@ -56,6 +60,18 @@ func New(config *Config) (http.RoundTripper, error) { return HTTPWrappersForConfig(config, rt) } +func isValidHolders(config *Config) bool { + if config.TLS.GetCertHolder != nil && config.TLS.GetCertHolder.GetCert == nil { + return false + } + + if config.DialHolder != nil && config.DialHolder.Dial == nil { + return false + } + + return true +} + // TLSConfigFor returns a tls.Config that will provide the transport level security defined // by the provided Config. Will return nil if no transport level security is requested. func TLSConfigFor(c *Config) (*tls.Config, error) { @@ -116,7 +132,7 @@ func TLSConfigFor(c *Config) (*tls.Config, error) { return dynamicCertLoader() } if c.HasCertCallback() { - cert, err := c.TLS.GetCert() + cert, err := c.TLS.GetCertHolder.GetCert() if err != nil { return nil, err } @@ -157,10 +173,7 @@ func loadTLSFiles(c *Config) error { } c.TLS.KeyData, err = dataFromSliceOrFile(c.TLS.KeyData, c.TLS.KeyFile) - if err != nil { - return err - } - return nil + return err } // dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, @@ -170,7 +183,7 @@ func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { return data, nil } if len(file) > 0 { - fileData, err := ioutil.ReadFile(file) + fileData, err := os.ReadFile(file) if err != nil { return []byte{}, err } diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go index 75143ec07..91e171271 100644 --- a/vendor/k8s.io/client-go/util/cert/cert.go +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -25,9 +25,10 @@ import ( "crypto/x509/pkix" "encoding/pem" "fmt" - "io/ioutil" + "math" "math/big" "net" + "os" "path/filepath" "strings" "time" @@ -44,6 +45,7 @@ type Config struct { Organization []string AltNames AltNames Usages []x509.ExtKeyUsage + NotBefore time.Time } // AltNames contains the domain names and IP addresses that will be added @@ -57,14 +59,24 @@ type AltNames struct { // NewSelfSignedCACert creates a CA certificate func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) { now := time.Now() + // returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max). + serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1)) + if err != nil { + return nil, err + } + serial = new(big.Int).Add(serial, big.NewInt(1)) + notBefore := now.UTC() + if !cfg.NotBefore.IsZero() { + notBefore = cfg.NotBefore.UTC() + } tmpl := x509.Certificate{ - SerialNumber: new(big.Int).SetInt64(0), + SerialNumber: serial, Subject: pkix.Name{ CommonName: cfg.CommonName, Organization: cfg.Organization, }, DNSNames: []string{cfg.CommonName}, - NotBefore: now.UTC(), + NotBefore: notBefore, NotAfter: now.Add(duration365d * 10).UTC(), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, @@ -101,9 +113,9 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a certFixturePath := filepath.Join(fixtureDirectory, baseName+".crt") keyFixturePath := filepath.Join(fixtureDirectory, baseName+".key") if len(fixtureDirectory) > 0 { - cert, err := ioutil.ReadFile(certFixturePath) + cert, err := os.ReadFile(certFixturePath) if err == nil { - key, err := ioutil.ReadFile(keyFixturePath) + key, err := os.ReadFile(keyFixturePath) if err == nil { return cert, key, nil } @@ -116,9 +128,14 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a if err != nil { return nil, nil, err } - + // returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max). + serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1)) + if err != nil { + return nil, nil, err + } + serial = new(big.Int).Add(serial, big.NewInt(1)) caTemplate := x509.Certificate{ - SerialNumber: big.NewInt(1), + SerialNumber: serial, Subject: pkix.Name{ CommonName: fmt.Sprintf("%s-ca@%d", host, time.Now().Unix()), }, @@ -144,9 +161,14 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a if err != nil { return nil, nil, err } - + // returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max). + serial, err = cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1)) + if err != nil { + return nil, nil, err + } + serial = new(big.Int).Add(serial, big.NewInt(1)) template := x509.Certificate{ - SerialNumber: big.NewInt(2), + SerialNumber: serial, Subject: pkix.Name{ CommonName: fmt.Sprintf("%s@%d", host, time.Now().Unix()), }, @@ -188,10 +210,10 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a } if len(fixtureDirectory) > 0 { - if err := ioutil.WriteFile(certFixturePath, certBuffer.Bytes(), 0644); err != nil { + if err := os.WriteFile(certFixturePath, certBuffer.Bytes(), 0644); err != nil { return nil, nil, fmt.Errorf("failed to write cert fixture to %s: %v", certFixturePath, err) } - if err := ioutil.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0644); err != nil { + if err := os.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0600); err != nil { return nil, nil, fmt.Errorf("failed to write key fixture to %s: %v", certFixturePath, err) } } diff --git a/vendor/k8s.io/client-go/util/cert/io.go b/vendor/k8s.io/client-go/util/cert/io.go index 35fde68a4..a70e51327 100644 --- a/vendor/k8s.io/client-go/util/cert/io.go +++ b/vendor/k8s.io/client-go/util/cert/io.go @@ -19,7 +19,6 @@ package cert import ( "crypto/x509" "fmt" - "io/ioutil" "os" "path/filepath" ) @@ -66,13 +65,13 @@ func WriteCert(certPath string, data []byte) error { if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil { return err } - return ioutil.WriteFile(certPath, data, os.FileMode(0644)) + return os.WriteFile(certPath, data, os.FileMode(0644)) } // NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file. // Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates func NewPool(filename string) (*x509.CertPool, error) { - pemBlock, err := ioutil.ReadFile(filename) + pemBlock, err := os.ReadFile(filename) if err != nil { return nil, err } @@ -101,7 +100,7 @@ func NewPoolFromBytes(pemBlock []byte) (*x509.CertPool, error) { // CertsFromFile returns the x509.Certificates contained in the given PEM-encoded file. // Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates func CertsFromFile(file string) ([]*x509.Certificate, error) { - pemBlock, err := ioutil.ReadFile(file) + pemBlock, err := os.ReadFile(file) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/util/exec/exec.go b/vendor/k8s.io/client-go/util/exec/exec.go new file mode 100644 index 000000000..d170badb6 --- /dev/null +++ b/vendor/k8s.io/client-go/util/exec/exec.go @@ -0,0 +1,52 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +// ExitError is an interface that presents an API similar to os.ProcessState, which is +// what ExitError from os/exec is. This is designed to make testing a bit easier and +// probably loses some of the cross-platform properties of the underlying library. +type ExitError interface { + String() string + Error() string + Exited() bool + ExitStatus() int +} + +// CodeExitError is an implementation of ExitError consisting of an error object +// and an exit code (the upper bits of os.exec.ExitStatus). +type CodeExitError struct { + Err error + Code int +} + +var _ ExitError = CodeExitError{} + +func (e CodeExitError) Error() string { + return e.Err.Error() +} + +func (e CodeExitError) String() string { + return e.Err.Error() +} + +func (e CodeExitError) Exited() bool { + return true +} + +func (e CodeExitError) ExitStatus() int { + return e.Code +} diff --git a/vendor/k8s.io/client-go/util/keyutil/key.go b/vendor/k8s.io/client-go/util/keyutil/key.go index 83c2c6254..ecd3e4710 100644 --- a/vendor/k8s.io/client-go/util/keyutil/key.go +++ b/vendor/k8s.io/client-go/util/keyutil/key.go @@ -26,7 +26,6 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "io/ioutil" "os" "path/filepath" ) @@ -69,13 +68,13 @@ func WriteKey(keyPath string, data []byte) error { if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil { return err } - return ioutil.WriteFile(keyPath, data, os.FileMode(0600)) + return os.WriteFile(keyPath, data, os.FileMode(0600)) } // LoadOrGenerateKeyFile looks for a key in the file at the given path. If it // can't find one, it will generate a new key and store it there. func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) { - loadedData, err := ioutil.ReadFile(keyPath) + loadedData, err := os.ReadFile(keyPath) // Call verifyKeyData to ensure the file wasn't empty/corrupt. if err == nil && verifyKeyData(loadedData) { return loadedData, false, err @@ -122,7 +121,7 @@ func MarshalPrivateKeyToPEM(privateKey crypto.PrivateKey) ([]byte, error) { // PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file. // Returns an error if the file could not be read or if the private key could not be parsed. func PrivateKeyFromFile(file string) (interface{}, error) { - data, err := ioutil.ReadFile(file) + data, err := os.ReadFile(file) if err != nil { return nil, err } @@ -136,7 +135,7 @@ func PrivateKeyFromFile(file string) (interface{}, error) { // PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file. // Reads public keys from both public and private key files. func PublicKeysFromFile(file string) ([]interface{}, error) { - data, err := ioutil.ReadFile(file) + data, err := os.ReadFile(file) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index 26eacc2ba..c1df72030 100644 --- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -33,38 +33,81 @@ type DelayingInterface interface { AddAfter(item interface{}, duration time.Duration) } +// DelayingQueueConfig specifies optional configurations to customize a DelayingInterface. +type DelayingQueueConfig struct { + // Name for the queue. If unnamed, the metrics will not be registered. + Name string + + // MetricsProvider optionally allows specifying a metrics provider to use for the queue + // instead of the global provider. + MetricsProvider MetricsProvider + + // Clock optionally allows injecting a real or fake clock for testing purposes. + Clock clock.WithTicker + + // Queue optionally allows injecting custom queue Interface instead of the default one. + Queue Interface +} + // NewDelayingQueue constructs a new workqueue with delayed queuing ability. // NewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use -// NewNamedDelayingQueue instead. +// NewDelayingQueueWithConfig instead and specify a name. func NewDelayingQueue() DelayingInterface { - return NewDelayingQueueWithCustomClock(clock.RealClock{}, "") + return NewDelayingQueueWithConfig(DelayingQueueConfig{}) +} + +// NewDelayingQueueWithConfig constructs a new workqueue with options to +// customize different properties. +func NewDelayingQueueWithConfig(config DelayingQueueConfig) DelayingInterface { + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + + if config.Queue == nil { + config.Queue = NewWithConfig(QueueConfig{ + Name: config.Name, + MetricsProvider: config.MetricsProvider, + Clock: config.Clock, + }) + } + + return newDelayingQueue(config.Clock, config.Queue, config.Name, config.MetricsProvider) } // NewDelayingQueueWithCustomQueue constructs a new workqueue with ability to // inject custom queue Interface instead of the default one +// Deprecated: Use NewDelayingQueueWithConfig instead. func NewDelayingQueueWithCustomQueue(q Interface, name string) DelayingInterface { - return newDelayingQueue(clock.RealClock{}, q, name) + return NewDelayingQueueWithConfig(DelayingQueueConfig{ + Name: name, + Queue: q, + }) } -// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability +// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability. +// Deprecated: Use NewDelayingQueueWithConfig instead. func NewNamedDelayingQueue(name string) DelayingInterface { - return NewDelayingQueueWithCustomClock(clock.RealClock{}, name) + return NewDelayingQueueWithConfig(DelayingQueueConfig{Name: name}) } // NewDelayingQueueWithCustomClock constructs a new named workqueue -// with ability to inject real or fake clock for testing purposes +// with ability to inject real or fake clock for testing purposes. +// Deprecated: Use NewDelayingQueueWithConfig instead. func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) DelayingInterface { - return newDelayingQueue(clock, NewNamed(name), name) + return NewDelayingQueueWithConfig(DelayingQueueConfig{ + Name: name, + Clock: clock, + }) } -func newDelayingQueue(clock clock.WithTicker, q Interface, name string) *delayingType { +func newDelayingQueue(clock clock.WithTicker, q Interface, name string, provider MetricsProvider) *delayingType { ret := &delayingType{ Interface: q, clock: clock, heartbeat: clock.NewTicker(maxWait), stopCh: make(chan struct{}), waitingForAddCh: make(chan *waitFor, 1000), - metrics: newRetryMetrics(name), + metrics: newRetryMetrics(name, provider), } go ret.waitingLoop() diff --git a/vendor/k8s.io/client-go/util/workqueue/doc.go b/vendor/k8s.io/client-go/util/workqueue/doc.go index a5c976e0f..8555aa95f 100644 --- a/vendor/k8s.io/client-go/util/workqueue/doc.go +++ b/vendor/k8s.io/client-go/util/workqueue/doc.go @@ -16,11 +16,11 @@ limitations under the License. // Package workqueue provides a simple queue that supports the following // features: -// * Fair: items processed in the order in which they are added. -// * Stingy: a single item will not be processed multiple times concurrently, -// and if an item is added multiple times before it can be processed, it -// will only be processed once. -// * Multiple consumers and producers. In particular, it is allowed for an -// item to be reenqueued while it is being processed. -// * Shutdown notifications. +// - Fair: items processed in the order in which they are added. +// - Stingy: a single item will not be processed multiple times concurrently, +// and if an item is added multiple times before it can be processed, it +// will only be processed once. +// - Multiple consumers and producers. In particular, it is allowed for an +// item to be reenqueued while it is being processed. +// - Shutdown notifications. package workqueue // import "k8s.io/client-go/util/workqueue" diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go index 4b0a69616..f012ccc55 100644 --- a/vendor/k8s.io/client-go/util/workqueue/metrics.go +++ b/vendor/k8s.io/client-go/util/workqueue/metrics.go @@ -244,13 +244,18 @@ func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) qu } } -func newRetryMetrics(name string) retryMetrics { +func newRetryMetrics(name string, provider MetricsProvider) retryMetrics { var ret *defaultRetryMetrics if len(name) == 0 { return ret } + + if provider == nil { + provider = globalMetricsFactory.metricsProvider + } + return &defaultRetryMetrics{ - retries: globalMetricsFactory.metricsProvider.NewRetriesMetric(name), + retries: provider.NewRetriesMetric(name), } } diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go index 6f7063269..380c06455 100644 --- a/vendor/k8s.io/client-go/util/workqueue/queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -33,17 +33,60 @@ type Interface interface { ShuttingDown() bool } +// QueueConfig specifies optional configurations to customize an Interface. +type QueueConfig struct { + // Name for the queue. If unnamed, the metrics will not be registered. + Name string + + // MetricsProvider optionally allows specifying a metrics provider to use for the queue + // instead of the global provider. + MetricsProvider MetricsProvider + + // Clock ability to inject real or fake clock for testing purposes. + Clock clock.WithTicker +} + // New constructs a new work queue (see the package comment). func New() *Type { - return NewNamed("") + return NewWithConfig(QueueConfig{ + Name: "", + }) } +// NewWithConfig constructs a new workqueue with ability to +// customize different properties. +func NewWithConfig(config QueueConfig) *Type { + return newQueueWithConfig(config, defaultUnfinishedWorkUpdatePeriod) +} + +// NewNamed creates a new named queue. +// Deprecated: Use NewWithConfig instead. func NewNamed(name string) *Type { - rc := clock.RealClock{} + return NewWithConfig(QueueConfig{ + Name: name, + }) +} + +// newQueueWithConfig constructs a new named workqueue +// with the ability to customize different properties for testing purposes +func newQueueWithConfig(config QueueConfig, updatePeriod time.Duration) *Type { + var metricsFactory *queueMetricsFactory + if config.MetricsProvider != nil { + metricsFactory = &queueMetricsFactory{ + metricsProvider: config.MetricsProvider, + } + } else { + metricsFactory = &globalMetricsFactory + } + + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + return newQueue( - rc, - globalMetricsFactory.newQueueMetrics(name, rc), - defaultUnfinishedWorkUpdatePeriod, + config.Clock, + metricsFactory.newQueueMetrics(config.Name, config.Clock), + updatePeriod, ) } diff --git a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go index 267f4ff40..3e4016fb0 100644 --- a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go @@ -16,6 +16,8 @@ limitations under the License. package workqueue +import "k8s.io/utils/clock" + // RateLimitingInterface is an interface that rate limits items being added to the queue. type RateLimitingInterface interface { DelayingInterface @@ -32,22 +34,68 @@ type RateLimitingInterface interface { NumRequeues(item interface{}) int } +// RateLimitingQueueConfig specifies optional configurations to customize a RateLimitingInterface. + +type RateLimitingQueueConfig struct { + // Name for the queue. If unnamed, the metrics will not be registered. + Name string + + // MetricsProvider optionally allows specifying a metrics provider to use for the queue + // instead of the global provider. + MetricsProvider MetricsProvider + + // Clock optionally allows injecting a real or fake clock for testing purposes. + Clock clock.WithTicker + + // DelayingQueue optionally allows injecting custom delaying queue DelayingInterface instead of the default one. + DelayingQueue DelayingInterface +} + // NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability // Remember to call Forget! If you don't, you may end up tracking failures forever. // NewRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use -// NewNamedRateLimitingQueue instead. +// NewRateLimitingQueueWithConfig instead and specify a name. func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface { + return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{}) +} + +// NewRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability +// with options to customize different properties. +// Remember to call Forget! If you don't, you may end up tracking failures forever. +func NewRateLimitingQueueWithConfig(rateLimiter RateLimiter, config RateLimitingQueueConfig) RateLimitingInterface { + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + + if config.DelayingQueue == nil { + config.DelayingQueue = NewDelayingQueueWithConfig(DelayingQueueConfig{ + Name: config.Name, + MetricsProvider: config.MetricsProvider, + Clock: config.Clock, + }) + } + return &rateLimitingType{ - DelayingInterface: NewDelayingQueue(), + DelayingInterface: config.DelayingQueue, rateLimiter: rateLimiter, } } +// NewNamedRateLimitingQueue constructs a new named workqueue with rateLimited queuing ability. +// Deprecated: Use NewRateLimitingQueueWithConfig instead. func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface { - return &rateLimitingType{ - DelayingInterface: NewNamedDelayingQueue(name), - rateLimiter: rateLimiter, - } + return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{ + Name: name, + }) +} + +// NewRateLimitingQueueWithDelayingInterface constructs a new named workqueue with rateLimited queuing ability +// with the option to inject a custom delaying queue instead of the default one. +// Deprecated: Use NewRateLimitingQueueWithConfig instead. +func NewRateLimitingQueueWithDelayingInterface(di DelayingInterface, rateLimiter RateLimiter) RateLimitingInterface { + return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{ + DelayingQueue: di, + }) } // rateLimitingType wraps an Interface and provides rateLimited re-enquing diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS index 8cccebf2e..a2fe8f351 100644 --- a/vendor/k8s.io/klog/v2/OWNERS +++ b/vendor/k8s.io/klog/v2/OWNERS @@ -1,5 +1,6 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: + - harshanarayana - pohly approvers: - dims diff --git a/vendor/k8s.io/klog/v2/README.md b/vendor/k8s.io/klog/v2/README.md index 7de2212cc..d45cbe172 100644 --- a/vendor/k8s.io/klog/v2/README.md +++ b/vendor/k8s.io/klog/v2/README.md @@ -28,7 +28,6 @@ Historical context is available here: Semantic versioning is used in this repository. It contains several Go modules with different levels of stability: - `k8s.io/klog/v2` - stable API, `vX.Y.Z` tags -- `k8s.io/hack/tools` - no stable API yet (may change eventually or get moved to separate repo), `hack/tools/v0.Y.Z` tags - `examples` - no stable API, no tags, no intention to ever stabilize Exempt from the API stability guarantee are items (packages, functions, etc.) diff --git a/vendor/k8s.io/klog/v2/contextual.go b/vendor/k8s.io/klog/v2/contextual.go index 0bf19280e..005513f2a 100644 --- a/vendor/k8s.io/klog/v2/contextual.go +++ b/vendor/k8s.io/klog/v2/contextual.go @@ -34,18 +34,6 @@ import ( // mutex locking. var ( - // contextualLoggingEnabled controls whether contextual logging is - // active. Disabling it may have some small performance benefit. - contextualLoggingEnabled = true - - // globalLogger is the global Logger chosen by users of klog, nil if - // none is available. - globalLogger *Logger - - // globalLoggerOptions contains the options that were supplied for - // globalLogger. - globalLoggerOptions loggerOptions - // klogLogger is used as fallback for logging through the normal klog code // when no Logger is set. klogLogger logr.Logger = logr.New(&klogger{}) @@ -59,8 +47,9 @@ var ( // If set, all log lines will be suppressed from the regular output, and // redirected to the logr implementation. // Use as: -// ... -// klog.SetLogger(zapr.NewLogger(zapLog)) +// +// ... +// klog.SetLogger(zapr.NewLogger(zapLog)) // // To remove a backing logr implemention, use ClearLogger. Setting an // empty logger with SetLogger(logr.Logger{}) does not work. @@ -81,10 +70,13 @@ func SetLogger(logger logr.Logger) { // routing log entries through klogr into klog and then into the actual Logger // backend. func SetLoggerWithOptions(logger logr.Logger, opts ...LoggerOption) { - globalLogger = &logger - globalLoggerOptions = loggerOptions{} + logging.loggerOptions = loggerOptions{} for _, opt := range opts { - opt(&globalLoggerOptions) + opt(&logging.loggerOptions) + } + logging.logger = &logWriter{ + Logger: logger, + writeKlogBuffer: logging.loggerOptions.writeKlogBuffer, } } @@ -104,6 +96,22 @@ func FlushLogger(flush func()) LoggerOption { } } +// WriteKlogBuffer sets a callback that will be invoked by klog to write output +// produced by non-structured log calls like Infof. +// +// The buffer will contain exactly the same data that klog normally would write +// into its own output stream(s). In particular this includes the header, if +// klog is configured to write one. The callback then can divert that data into +// its own output streams. The buffer may or may not end in a line break. +// +// Without such a callback, klog will call the logger's Info or Error method +// with just the message string (i.e. no header). +func WriteKlogBuffer(write func([]byte)) LoggerOption { + return func(o *loggerOptions) { + o.writeKlogBuffer = write + } +} + // LoggerOption implements the functional parameter paradigm for // SetLoggerWithOptions. type LoggerOption func(o *loggerOptions) @@ -111,6 +119,13 @@ type LoggerOption func(o *loggerOptions) type loggerOptions struct { contextualLogger bool flush func() + writeKlogBuffer func([]byte) +} + +// logWriter combines a logger (always set) with a write callback (optional). +type logWriter struct { + Logger + writeKlogBuffer func([]byte) } // ClearLogger removes a backing Logger implementation if one was set earlier @@ -119,8 +134,8 @@ type loggerOptions struct { // Modifying the logger is not thread-safe and should be done while no other // goroutines invoke log calls, usually during program initialization. func ClearLogger() { - globalLogger = nil - globalLoggerOptions = loggerOptions{} + logging.logger = nil + logging.loggerOptions = loggerOptions{} } // EnableContextualLogging controls whether contextual logging is enabled. @@ -132,14 +147,14 @@ func ClearLogger() { // // This must be called during initialization before goroutines are started. func EnableContextualLogging(enabled bool) { - contextualLoggingEnabled = enabled + logging.contextualLoggingEnabled = enabled } // FromContext retrieves a logger set by the caller or, if not set, // falls back to the program's global logger (a Logger instance or klog // itself). func FromContext(ctx context.Context) Logger { - if contextualLoggingEnabled { + if logging.contextualLoggingEnabled { if logger, err := logr.FromContext(ctx); err == nil { return logger } @@ -160,10 +175,10 @@ func TODO() Logger { // better receive a logger via its parameters. TODO can be used as a temporary // solution for such code. func Background() Logger { - if globalLoggerOptions.contextualLogger { - // Is non-nil because globalLoggerOptions.contextualLogger is + if logging.loggerOptions.contextualLogger { + // Is non-nil because logging.loggerOptions.contextualLogger is // only true if a logger was set. - return *globalLogger + return logging.logger.Logger } return klogLogger @@ -172,7 +187,7 @@ func Background() Logger { // LoggerWithValues returns logger.WithValues(...kv) when // contextual logging is enabled, otherwise the logger. func LoggerWithValues(logger Logger, kv ...interface{}) Logger { - if contextualLoggingEnabled { + if logging.contextualLoggingEnabled { return logger.WithValues(kv...) } return logger @@ -181,7 +196,7 @@ func LoggerWithValues(logger Logger, kv ...interface{}) Logger { // LoggerWithName returns logger.WithName(name) when contextual logging is // enabled, otherwise the logger. func LoggerWithName(logger Logger, name string) Logger { - if contextualLoggingEnabled { + if logging.contextualLoggingEnabled { return logger.WithName(name) } return logger @@ -190,7 +205,7 @@ func LoggerWithName(logger Logger, name string) Logger { // NewContext returns logr.NewContext(ctx, logger) when // contextual logging is enabled, otherwise ctx. func NewContext(ctx context.Context, logger Logger) context.Context { - if contextualLoggingEnabled { + if logging.contextualLoggingEnabled { return logr.NewContext(ctx, logger) } return ctx diff --git a/vendor/k8s.io/klog/v2/format.go b/vendor/k8s.io/klog/v2/format.go new file mode 100644 index 000000000..63995ca6d --- /dev/null +++ b/vendor/k8s.io/klog/v2/format.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-logr/logr" +) + +// Format wraps a value of an arbitrary type and implement fmt.Stringer and +// logr.Marshaler for them. Stringer returns pretty-printed JSON. MarshalLog +// returns the original value with a type that has no special methods, in +// particular no MarshalLog or MarshalJSON. +// +// Wrapping values like that is useful when the value has a broken +// implementation of these special functions (for example, a type which +// inherits String from TypeMeta, but then doesn't re-implement String) or the +// implementation produces output that is less readable or unstructured (for +// example, the generated String functions for Kubernetes API types). +func Format(obj interface{}) interface{} { + return formatAny{Object: obj} +} + +type formatAny struct { + Object interface{} +} + +func (f formatAny) String() string { + var buffer strings.Builder + encoder := json.NewEncoder(&buffer) + encoder.SetIndent("", " ") + if err := encoder.Encode(&f.Object); err != nil { + return fmt.Sprintf("error marshaling %T to JSON: %v", f, err) + } + return buffer.String() +} + +func (f formatAny) MarshalLog() interface{} { + // Returning a pointer to a pointer ensures that zapr doesn't find a + // fmt.Stringer or logr.Marshaler when it checks the type of the + // value. It then falls back to reflection, which dumps the value being + // pointed to (JSON doesn't have pointers). + ptr := &f.Object + return &ptr +} + +var _ fmt.Stringer = formatAny{} +var _ logr.Marshaler = formatAny{} diff --git a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go index ac88682a2..f325ded5e 100644 --- a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go +++ b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go @@ -40,44 +40,33 @@ type Buffer struct { next *Buffer } -// Buffers manages the reuse of individual buffer instances. It is thread-safe. -type Buffers struct { - // mu protects the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - mu sync.Mutex - - // freeList is a list of byte buffers, maintained under mu. - freeList *Buffer +var buffers = sync.Pool{ + New: func() interface{} { + return new(Buffer) + }, } // GetBuffer returns a new, ready-to-use buffer. -func (bl *Buffers) GetBuffer() *Buffer { - bl.mu.Lock() - b := bl.freeList - if b != nil { - bl.freeList = b.next - } - bl.mu.Unlock() - if b == nil { - b = new(Buffer) - } else { - b.next = nil - b.Reset() - } +func GetBuffer() *Buffer { + b := buffers.Get().(*Buffer) + b.Reset() return b } // PutBuffer returns a buffer to the free list. -func (bl *Buffers) PutBuffer(b *Buffer) { +func PutBuffer(b *Buffer) { if b.Len() >= 256 { - // Let big buffers die a natural death. + // Let big buffers die a natural death, without relying on + // sync.Pool behavior. The documentation implies that items may + // get deallocated while stored there ("If the Pool holds the + // only reference when this [= be removed automatically] + // happens, the item might be deallocated."), but + // https://github.com/golang/go/issues/23199 leans more towards + // having such a size limit. return } - bl.mu.Lock() - b.next = bl.freeList - bl.freeList = b - bl.mu.Unlock() + + buffers.Put(b) } // Some custom tiny helper functions to print the log header efficiently. @@ -121,7 +110,8 @@ func (buf *Buffer) someDigits(i, d int) int { return copy(buf.Tmp[i:], buf.Tmp[j:]) } -// FormatHeader formats a log header using the provided file name and line number. +// FormatHeader formats a log header using the provided file name and line number +// and writes it into the buffer. func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now time.Time) { if line < 0 { line = 0 // not a real line number, but acceptable to someDigits @@ -157,3 +147,30 @@ func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now buf.Tmp[n+2] = ' ' buf.Write(buf.Tmp[:n+3]) } + +// SprintHeader formats a log header and returns a string. This is a simpler +// version of FormatHeader for use in ktesting. +func (buf *Buffer) SprintHeader(s severity.Severity, now time.Time) string { + if s > severity.FatalLog { + s = severity.InfoLog // for safety. + } + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.Tmp[0] = severity.Char[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.Tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.Tmp[8] = ':' + buf.twoDigits(9, minute) + buf.Tmp[11] = ':' + buf.twoDigits(12, second) + buf.Tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.Tmp[21] = ']' + return string(buf.Tmp[:22]) +} diff --git a/vendor/k8s.io/klog/v2/internal/dbg/dbg.go b/vendor/k8s.io/klog/v2/internal/dbg/dbg.go new file mode 100644 index 000000000..f27bd1447 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/dbg/dbg.go @@ -0,0 +1,42 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package dbg provides some helper code for call traces. +package dbg + +import ( + "runtime" +) + +// Stacks is a wrapper for runtime.Stack that attempts to recover the data for +// all goroutines or the calling one. +func Stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go index d89731368..bcdf5f8ee 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -18,10 +18,17 @@ package serialize import ( "bytes" + "encoding/json" "fmt" "strconv" + + "github.com/go-logr/logr" ) +type textWriter interface { + WriteText(*bytes.Buffer) +} + // WithValues implements LogSink.WithValues. The old key/value pairs are // assumed to be well-formed, the new ones are checked and padded if // necessary. It returns a new slice. @@ -44,60 +51,111 @@ func WithValues(oldKV, newKV []interface{}) []interface{} { return kv } -// TrimDuplicates deduplicates elements provided in multiple key/value tuple -// slices, whilst maintaining the distinction between where the items are -// contained. -func TrimDuplicates(kvLists ...[]interface{}) [][]interface{} { - // maintain a map of all seen keys - seenKeys := map[interface{}]struct{}{} - // build the same number of output slices as inputs - outs := make([][]interface{}, len(kvLists)) - // iterate over the input slices backwards, as 'later' kv specifications - // of the same key will take precedence over earlier ones - for i := len(kvLists) - 1; i >= 0; i-- { - // initialise this output slice - outs[i] = []interface{}{} - // obtain a reference to the kvList we are processing - // and make sure it has an even number of entries - kvList := kvLists[i] - if len(kvList)%2 != 0 { - kvList = append(kvList, missingValue) +// MergeKVs deduplicates elements provided in two key/value slices. +// +// Keys in each slice are expected to be unique, so duplicates can only occur +// when the first and second slice contain the same key. When that happens, the +// key/value pair from the second slice is used. The first slice must be well-formed +// (= even key/value pairs). The second one may have a missing value, in which +// case the special "missing value" is added to the result. +func MergeKVs(first, second []interface{}) []interface{} { + maxLength := len(first) + (len(second)+1)/2*2 + if maxLength == 0 { + // Nothing to do at all. + return nil + } + + if len(first) == 0 && len(second)%2 == 0 { + // Nothing to be overridden, second slice is well-formed + // and can be used directly. + return second + } + + // Determine which keys are in the second slice so that we can skip + // them when iterating over the first one. The code intentionally + // favors performance over completeness: we assume that keys are string + // constants and thus compare equal when the string values are equal. A + // string constant being overridden by, for example, a fmt.Stringer is + // not handled. + overrides := map[interface{}]bool{} + for i := 0; i < len(second); i += 2 { + overrides[second[i]] = true + } + merged := make([]interface{}, 0, maxLength) + for i := 0; i+1 < len(first); i += 2 { + key := first[i] + if overrides[key] { + continue } + merged = append(merged, key, first[i+1]) + } + merged = append(merged, second...) + if len(merged)%2 != 0 { + merged = append(merged, missingValue) + } + return merged +} + +type Formatter struct { + AnyToStringHook AnyToStringFunc +} + +type AnyToStringFunc func(v interface{}) string - // start iterating at len(kvList) - 2 (i.e. the 2nd last item) for - // slices that have an even number of elements. - // We add (len(kvList) % 2) here to handle the case where there is an - // odd number of elements in a kvList. - // If there is an odd number, then the last element in the slice will - // have the value 'null'. - for i2 := len(kvList) - 2 + (len(kvList) % 2); i2 >= 0; i2 -= 2 { - k := kvList[i2] - // if we have already seen this key, do not include it again - if _, ok := seenKeys[k]; ok { - continue - } - // make a note that we've observed a new key - seenKeys[k] = struct{}{} - // attempt to obtain the value of the key - var v interface{} - // i2+1 should only ever be out of bounds if we handling the first - // iteration over a slice with an odd number of elements - if i2+1 < len(kvList) { - v = kvList[i2+1] - } - // add this KV tuple to the *start* of the output list to maintain - // the original order as we are iterating over the slice backwards - outs[i] = append([]interface{}{k, v}, outs[i]...) +// MergeKVsInto is a variant of MergeKVs which directly formats the key/value +// pairs into a buffer. +func (f Formatter) MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) { + if len(first) == 0 && len(second) == 0 { + // Nothing to do at all. + return + } + + if len(first) == 0 && len(second)%2 == 0 { + // Nothing to be overridden, second slice is well-formed + // and can be used directly. + for i := 0; i < len(second); i += 2 { + f.KVFormat(b, second[i], second[i+1]) + } + return + } + + // Determine which keys are in the second slice so that we can skip + // them when iterating over the first one. The code intentionally + // favors performance over completeness: we assume that keys are string + // constants and thus compare equal when the string values are equal. A + // string constant being overridden by, for example, a fmt.Stringer is + // not handled. + overrides := map[interface{}]bool{} + for i := 0; i < len(second); i += 2 { + overrides[second[i]] = true + } + for i := 0; i < len(first); i += 2 { + key := first[i] + if overrides[key] { + continue } + f.KVFormat(b, key, first[i+1]) + } + // Round down. + l := len(second) + l = l / 2 * 2 + for i := 1; i < l; i += 2 { + f.KVFormat(b, second[i-1], second[i]) + } + if len(second)%2 == 1 { + f.KVFormat(b, second[len(second)-1], missingValue) } - return outs +} + +func MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) { + Formatter{}.MergeAndFormatKVs(b, first, second) } const missingValue = "(MISSING)" // KVListFormat serializes all key/value pairs into the provided buffer. // A space gets inserted before the first pair and between each pair. -func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { +func (f Formatter) KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { for i := 0; i < len(keysAndValues); i += 2 { var v interface{} k := keysAndValues[i] @@ -106,49 +164,104 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { } else { v = missingValue } - b.WriteByte(' ') - // Keys are assumed to be well-formed according to - // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments - // for the sake of performance. Keys with spaces, - // special characters, etc. will break parsing. - if k, ok := k.(string); ok { - // Avoid one allocation when the key is a string, which - // normally it should be. - b.WriteString(k) - } else { - b.WriteString(fmt.Sprintf("%s", k)) - } + f.KVFormat(b, k, v) + } +} - // The type checks are sorted so that more frequently used ones - // come first because that is then faster in the common - // cases. In Kubernetes, ObjectRef (a Stringer) is more common - // than plain strings - // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). - switch v := v.(type) { - case fmt.Stringer: - writeStringValue(b, true, StringerToString(v)) +func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { + Formatter{}.KVListFormat(b, keysAndValues...) +} + +// KVFormat serializes one key/value pair into the provided buffer. +// A space gets inserted before the pair. +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if sK, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(sK) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } + + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + switch v := v.(type) { + case textWriter: + writeTextWriterValue(b, v) + case fmt.Stringer: + writeStringValue(b, StringerToString(v)) + case string: + writeStringValue(b, v) + case error: + writeStringValue(b, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { case string: - writeStringValue(b, true, v) - case error: - writeStringValue(b, true, ErrorToString(v)) - case []byte: - // In https://github.com/kubernetes/klog/pull/237 it was decided - // to format byte slices with "%+q". The advantages of that are: - // - readable output if the bytes happen to be printable - // - non-printable bytes get represented as unicode escape - // sequences (\uxxxx) - // - // The downsides are that we cannot use the faster - // strconv.Quote here and that multi-line output is not - // supported. If developers know that a byte array is - // printable and they want multi-line output, they can - // convert the value to string before logging it. - b.WriteByte('=') - b.WriteString(fmt.Sprintf("%+q", v)) + writeStringValue(b, value) default: - writeStringValue(b, false, fmt.Sprintf("%+v", v)) + f.formatAny(b, value) } + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + f.formatAny(b, v) + } +} + +func KVFormat(b *bytes.Buffer, k, v interface{}) { + Formatter{}.KVFormat(b, k, v) +} + +// formatAny is the fallback formatter for a value. It supports a hook (for +// example, for YAML encoding) and itself uses JSON encoding. +func (f Formatter) formatAny(b *bytes.Buffer, v interface{}) { + b.WriteRune('=') + if f.AnyToStringHook != nil { + b.WriteString(f.AnyToStringHook(v)) + return + } + encoder := json.NewEncoder(b) + l := b.Len() + if err := encoder.Encode(v); err != nil { + // This shouldn't happen. We discard whatever the encoder + // wrote and instead dump an error string. + b.Truncate(l) + b.WriteString(fmt.Sprintf(`""`, err)) + return } + // Remove trailing newline. + b.Truncate(b.Len() - 1) } // StringerToString converts a Stringer to a string, @@ -163,6 +276,18 @@ func StringerToString(s fmt.Stringer) (ret string) { return } +// MarshalerToValue invokes a marshaler and catches +// panics. +func MarshalerToValue(m logr.Marshaler) (ret interface{}) { + defer func() { + if err := recover(); err != nil { + ret = fmt.Sprintf("", err) + } + }() + ret = m.MarshalLog() + return +} + // ErrorToString converts an error to a string, // handling panics if they occur. func ErrorToString(err error) (ret string) { @@ -175,18 +300,23 @@ func ErrorToString(err error) (ret string) { return } -func writeStringValue(b *bytes.Buffer, quote bool, v string) { +func writeTextWriterValue(b *bytes.Buffer, v textWriter) { + b.WriteByte('=') + defer func() { + if err := recover(); err != nil { + fmt.Fprintf(b, `""`, err) + } + }() + v.WriteText(b) +} + +func writeStringValue(b *bytes.Buffer, v string) { data := []byte(v) index := bytes.IndexByte(data, '\n') if index == -1 { b.WriteByte('=') - if quote { - // Simple string, quote quotation marks and non-printable characters. - b.WriteString(strconv.Quote(v)) - return - } - // Non-string with no line breaks. - b.WriteString(v) + // Simple string, quote quotation marks and non-printable characters. + b.WriteString(strconv.Quote(v)) return } diff --git a/vendor/k8s.io/klog/v2/k8s_references.go b/vendor/k8s.io/klog/v2/k8s_references.go index db58f8baa..786af74bf 100644 --- a/vendor/k8s.io/klog/v2/k8s_references.go +++ b/vendor/k8s.io/klog/v2/k8s_references.go @@ -17,8 +17,10 @@ limitations under the License. package klog import ( + "bytes" "fmt" "reflect" + "strings" "github.com/go-logr/logr" ) @@ -31,11 +33,30 @@ type ObjectRef struct { func (ref ObjectRef) String() string { if ref.Namespace != "" { - return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name) + var builder strings.Builder + builder.Grow(len(ref.Namespace) + len(ref.Name) + 1) + builder.WriteString(ref.Namespace) + builder.WriteRune('/') + builder.WriteString(ref.Name) + return builder.String() } return ref.Name } +func (ref ObjectRef) WriteText(out *bytes.Buffer) { + out.WriteRune('"') + ref.writeUnquoted(out) + out.WriteRune('"') +} + +func (ref ObjectRef) writeUnquoted(out *bytes.Buffer) { + if ref.Namespace != "" { + out.WriteString(ref.Namespace) + out.WriteRune('/') + } + out.WriteString(ref.Name) +} + // MarshalLog ensures that loggers with support for structured output will log // as a struct by removing the String method via a custom type. func (ref ObjectRef) MarshalLog() interface{} { @@ -77,6 +98,8 @@ func KRef(namespace, name string) ObjectRef { } // KObjs returns slice of ObjectRef from an slice of ObjectMeta +// +// DEPRECATED: Use KObjSlice instead, it has better performance. func KObjs(arg interface{}) []ObjectRef { s := reflect.ValueOf(arg) if s.Kind() != reflect.Slice { @@ -92,3 +115,98 @@ func KObjs(arg interface{}) []ObjectRef { } return objectRefs } + +// KObjSlice takes a slice of objects that implement the KMetadata interface +// and returns an object that gets logged as a slice of ObjectRef values or a +// string containing those values, depending on whether the logger prefers text +// output or structured output. +// +// An error string is logged when KObjSlice is not passed a suitable slice. +// +// Processing of the argument is delayed until the value actually gets logged, +// in contrast to KObjs where that overhead is incurred regardless of whether +// the result is needed. +func KObjSlice(arg interface{}) interface{} { + return kobjSlice{arg: arg} +} + +type kobjSlice struct { + arg interface{} +} + +var _ fmt.Stringer = kobjSlice{} +var _ logr.Marshaler = kobjSlice{} + +func (ks kobjSlice) String() string { + objectRefs, errStr := ks.process() + if errStr != "" { + return errStr + } + return fmt.Sprintf("%v", objectRefs) +} + +func (ks kobjSlice) MarshalLog() interface{} { + objectRefs, errStr := ks.process() + if errStr != "" { + return errStr + } + return objectRefs +} + +func (ks kobjSlice) process() (objs []interface{}, err string) { + s := reflect.ValueOf(ks.arg) + switch s.Kind() { + case reflect.Invalid: + // nil parameter, print as nil. + return nil, "" + case reflect.Slice: + // Okay, handle below. + default: + return nil, fmt.Sprintf("", ks.arg) + } + objectRefs := make([]interface{}, 0, s.Len()) + for i := 0; i < s.Len(); i++ { + item := s.Index(i).Interface() + if item == nil { + objectRefs = append(objectRefs, nil) + } else if v, ok := item.(KMetadata); ok { + objectRefs = append(objectRefs, KObj(v)) + } else { + return nil, fmt.Sprintf("", item) + } + } + return objectRefs, "" +} + +var nilToken = []byte("null") + +func (ks kobjSlice) WriteText(out *bytes.Buffer) { + s := reflect.ValueOf(ks.arg) + switch s.Kind() { + case reflect.Invalid: + // nil parameter, print as null. + out.Write(nilToken) + return + case reflect.Slice: + // Okay, handle below. + default: + fmt.Fprintf(out, `""`, ks.arg) + return + } + out.Write([]byte{'['}) + defer out.Write([]byte{']'}) + for i := 0; i < s.Len(); i++ { + if i > 0 { + out.Write([]byte{','}) + } + item := s.Index(i).Interface() + if item == nil { + out.Write(nilToken) + } else if v, ok := item.(KMetadata); ok { + KObj(v).WriteText(out) + } else { + fmt.Fprintf(out, `""`, item) + return + } + } +} diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index cb04590fe..152f8a6bd 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -39,35 +39,38 @@ // This package provides several flags that modify this behavior. // As a result, flag.Parse must be called before any logging is done. // -// -logtostderr=true -// Logs are written to standard error instead of to files. -// -alsologtostderr=false -// Logs are written to standard error as well as to files. -// -stderrthreshold=ERROR -// Log events at or above this severity are logged to standard -// error as well as to files. -// -log_dir="" -// Log files will be written to this directory instead of the -// default temporary directory. +// -logtostderr=true +// Logs are written to standard error instead of to files. +// This shortcuts most of the usual output routing: +// -alsologtostderr, -stderrthreshold and -log_dir have no +// effect and output redirection at runtime with SetOutput is +// ignored. +// -alsologtostderr=false +// Logs are written to standard error as well as to files. +// -stderrthreshold=ERROR +// Log events at or above this severity are logged to standard +// error as well as to files. +// -log_dir="" +// Log files will be written to this directory instead of the +// default temporary directory. // -// Other flags provide aids to debugging. -// -// -log_backtrace_at="" -// When set to a file and line number holding a logging statement, -// such as -// -log_backtrace_at=gopherflakes.go:234 -// a stack trace will be written to the Info log whenever execution -// hits that statement. (Unlike with -vmodule, the ".go" must be -// present.) -// -v=0 -// Enable V-leveled logging at the specified level. -// -vmodule="" -// The syntax of the argument is a comma-separated list of pattern=N, -// where pattern is a literal file name (minus the ".go" suffix) or -// "glob" pattern and N is a V level. For instance, -// -vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin "gopher". +// Other flags provide aids to debugging. // +// -log_backtrace_at="" +// When set to a file and line number holding a logging statement, +// such as +// -log_backtrace_at=gopherflakes.go:234 +// a stack trace will be written to the Info log whenever execution +// hits that statement. (Unlike with -vmodule, the ".go" must be +// present.) +// -v=0 +// Enable V-leveled logging at the specified level. +// -vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// -vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin "gopher". package klog import ( @@ -88,10 +91,9 @@ import ( "sync/atomic" "time" - "github.com/go-logr/logr" - "k8s.io/klog/v2/internal/buffer" "k8s.io/klog/v2/internal/clock" + "k8s.io/klog/v2/internal/dbg" "k8s.io/klog/v2/internal/serialize" "k8s.io/klog/v2/internal/severity" ) @@ -242,6 +244,10 @@ func (m *moduleSpec) String() string { // Lock because the type is not atomic. TODO: clean this up. logging.mu.Lock() defer logging.mu.Unlock() + return m.serialize() +} + +func (m *moduleSpec) serialize() string { var b bytes.Buffer for i, f := range m.filter { if i > 0 { @@ -263,6 +269,17 @@ var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of // Set will sets module value // Syntax: -vmodule=recordio=2,file=1,gfs*=3 func (m *moduleSpec) Set(value string) error { + filter, err := parseModuleSpec(value) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +func parseModuleSpec(value string) ([]modulePat, error) { var filter []modulePat for _, pat := range strings.Split(value, ",") { if len(pat) == 0 { @@ -271,15 +288,15 @@ func (m *moduleSpec) Set(value string) error { } patLev := strings.Split(pat, "=") if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax + return nil, errVmoduleSyntax } pattern := patLev[0] v, err := strconv.ParseInt(patLev[1], 10, 32) if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") + return nil, errors.New("syntax error: expect comma-separated list of filename=N") } if v < 0 { - return errors.New("negative value for vmodule level") + return nil, errors.New("negative value for vmodule level") } if v == 0 { continue // Ignore. It's harmless but no point in paying the overhead. @@ -287,10 +304,7 @@ func (m *moduleSpec) Set(value string) error { // TODO: check syntax of filter? filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil + return filter, nil } // isLiteral reports whether the pattern is a literal string, that is, has no metacharacters @@ -380,45 +394,48 @@ type flushSyncWriter interface { io.Writer } -// init sets up the defaults. +var logging loggingT +var commandLine flag.FlagSet + +// init sets up the defaults and creates command line flags. func init() { + commandLine.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory (no effect when -logtostderr=true)") + commandLine.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file (no effect when -logtostderr=true)") + commandLine.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", 1800, + "Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. "+ + "If the value is 0, the maximum file size is unlimited.") + commandLine.BoolVar(&logging.toStderr, "logtostderr", true, "log to standard error instead of files") + commandLine.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files (no effect when -logtostderr=true)") + logging.setVState(0, nil, false) + commandLine.Var(&logging.verbosity, "v", "number for the log level verbosity") + commandLine.BoolVar(&logging.addDirHeader, "add_dir_header", false, "If true, adds the file directory to the header of the log messages") + commandLine.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages") + commandLine.BoolVar(&logging.oneOutput, "one_output", false, "If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)") + commandLine.BoolVar(&logging.skipLogHeaders, "skip_log_headers", false, "If true, avoid headers when opening log files (no effect when -logtostderr=true)") logging.stderrThreshold = severityValue{ Severity: severity.ErrorLog, // Default stderrThreshold is ERROR. } - logging.setVState(0, nil, false) - logging.logDir = "" - logging.logFile = "" - logging.logFileMaxSizeMB = 1800 - logging.toStderr = true - logging.alsoToStderr = false - logging.skipHeaders = false - logging.addDirHeader = false - logging.skipLogHeaders = false - logging.oneOutput = false + commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=false)") + commandLine.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + commandLine.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + + logging.settings.contextualLoggingEnabled = true logging.flushD = newFlushDaemon(logging.lockAndFlushAll, nil) } // InitFlags is for explicitly initializing the flags. +// It may get called repeatedly for different flagsets, but not +// twice for the same one. May get called concurrently +// to other goroutines using klog. However, only some flags +// may get set concurrently (see implementation). func InitFlags(flagset *flag.FlagSet) { if flagset == nil { flagset = flag.CommandLine } - flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory") - flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file") - flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB, - "Defines the maximum size a log file can grow to. Unit is megabytes. "+ - "If the value is 0, the maximum file size is unlimited.") - flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files") - flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files") - flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") - flagset.BoolVar(&logging.addDirHeader, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header of the log messages") - flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") - flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level)") - flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") - flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + commandLine.VisitAll(func(f *flag.Flag) { + flagset.Var(f.Value, f.Name, f.Usage) + }) } // Flush flushes all pending log I/O. @@ -426,8 +443,20 @@ func Flush() { logging.lockAndFlushAll() } -// loggingT collects all the global state of the logging setup. -type loggingT struct { +// settings collects global settings. +type settings struct { + // contextualLoggingEnabled controls whether contextual logging is + // active. Disabling it may have some small performance benefit. + contextualLoggingEnabled bool + + // logger is the global Logger chosen by users of klog, nil if + // none is available. + logger *logWriter + + // loggerOptions contains the options that were supplied for + // globalLogger. + loggerOptions loggerOptions + // Boolean flags. Not handled atomically because the flag.Value interface // does not let us avoid the =true, and that shorthand is necessary for // compatibility. TODO: does this matter enough to fix? Seems unlikely. @@ -437,26 +466,14 @@ type loggingT struct { // Level flag. Handled atomically. stderrThreshold severityValue // The -stderrthreshold flag. - // bufferCache maintains the free list. It uses its own mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - bufferCache buffer.Buffers + // Access to all of the following fields must be protected via a mutex. - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex // file holds writer for each of the log types. file [severity.NumSeverity]flushSyncWriter - // flushD holds a flushDaemon that frequently flushes log file buffers. - flushD *flushDaemon // flushInterval is the interval for periodic flushing. If zero, // the global default will be used. flushInterval time.Duration - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater // than zero, it means vmodule is enabled. It may be read safely // using sync.LoadInt32, but is only modified under mu. @@ -496,7 +513,42 @@ type loggingT struct { filter LogFilter } -var logging loggingT +// deepCopy creates a copy that doesn't share anything with the original +// instance. +func (s settings) deepCopy() settings { + // vmodule is a slice and would be shared, so we have copy it. + filter := make([]modulePat, len(s.vmodule.filter)) + for i := range s.vmodule.filter { + filter[i] = s.vmodule.filter[i] + } + s.vmodule.filter = filter + + if s.logger != nil { + logger := *s.logger + s.logger = &logger + } + + return s +} + +// loggingT collects all the global state of the logging setup. +type loggingT struct { + settings + + // flushD holds a flushDaemon that frequently flushes log file buffers. + // Uses its own mutex. + flushD *flushDaemon + + // mu protects the remaining elements of this structure and the fields + // in settingsT which need a mutex lock. + mu sync.Mutex + + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level +} // setVState sets a consistent state for V logging. // l.mu is held. @@ -520,14 +572,66 @@ func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool var timeNow = time.Now // Stubbed out for testing. +// CaptureState gathers information about all current klog settings. +// The result can be used to restore those settings. +func CaptureState() State { + logging.mu.Lock() + defer logging.mu.Unlock() + return &state{ + settings: logging.settings.deepCopy(), + flushDRunning: logging.flushD.isRunning(), + maxSize: MaxSize, + } +} + +// State stores a snapshot of klog settings. It gets created with CaptureState +// and can be used to restore the entire state. Modifying individual settings +// is supported via the command line flags. +type State interface { + // Restore restore the entire state. It may get called more than once. + Restore() +} + +type state struct { + settings + + flushDRunning bool + maxSize uint64 +} + +func (s *state) Restore() { + // This needs to be done before mutex locking. + if s.flushDRunning && !logging.flushD.isRunning() { + // This is not quite accurate: StartFlushDaemon might + // have been called with some different interval. + interval := s.flushInterval + if interval == 0 { + interval = flushInterval + } + logging.flushD.run(interval) + } else if !s.flushDRunning && logging.flushD.isRunning() { + logging.flushD.stop() + } + + logging.mu.Lock() + defer logging.mu.Unlock() + + logging.settings = s.settings + logging.setVState(s.verbosity, s.vmodule.filter, true) + MaxSize = s.maxSize +} + /* header formats a log header as defined by the C++ implementation. It returns a buffer containing the formatted header and the user's file and line number. The depth specifies how many stack frames above lives the source line to be identified in the log message. Log lines have this form: + Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... + where the fields are defined as follows: + L A single character, representing the log level (eg 'I' for INFO) mm The month (zero padded; ie May is '05') dd The day (zero padded) @@ -558,7 +662,7 @@ func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, strin // formatHeader formats a log header using the provided file name and line number. func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer { - buf := l.bufferCache.GetBuffer() + buf := buffer.GetBuffer() if l.skipHeaders { return buf } @@ -567,17 +671,18 @@ func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buf return buf } -func (l *loggingT) println(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { +func (l *loggingT) println(s severity.Severity, logger *logWriter, filter LogFilter, args ...interface{}) { l.printlnDepth(s, logger, filter, 1, args...) } -func (l *loggingT) printlnDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { +func (l *loggingT) printlnDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) - // if logger is set, we clear the generated header as we rely on the backing - // logger implementation to print headers - if logger != nil { - l.bufferCache.PutBuffer(buf) - buf = l.bufferCache.GetBuffer() + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() } if filter != nil { args = filter.Filter(args) @@ -586,17 +691,18 @@ func (l *loggingT) printlnDepth(s severity.Severity, logger *logr.Logger, filter l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) print(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { +func (l *loggingT) print(s severity.Severity, logger *logWriter, filter LogFilter, args ...interface{}) { l.printDepth(s, logger, filter, 1, args...) } -func (l *loggingT) printDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { +func (l *loggingT) printDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) - // if logr is set, we clear the generated header as we rely on the backing - // logr implementation to print headers - if logger != nil { - l.bufferCache.PutBuffer(buf) - buf = l.bufferCache.GetBuffer() + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() } if filter != nil { args = filter.Filter(args) @@ -608,17 +714,18 @@ func (l *loggingT) printDepth(s severity.Severity, logger *logr.Logger, filter L l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) printf(s severity.Severity, logger *logr.Logger, filter LogFilter, format string, args ...interface{}) { +func (l *loggingT) printf(s severity.Severity, logger *logWriter, filter LogFilter, format string, args ...interface{}) { l.printfDepth(s, logger, filter, 1, format, args...) } -func (l *loggingT) printfDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, format string, args ...interface{}) { +func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, format string, args ...interface{}) { buf, file, line := l.header(s, depth) - // if logr is set, we clear the generated header as we rely on the backing - // logr implementation to print headers - if logger != nil { - l.bufferCache.PutBuffer(buf) - buf = l.bufferCache.GetBuffer() + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() } if filter != nil { format, args = filter.FilterF(format, args) @@ -633,13 +740,14 @@ func (l *loggingT) printfDepth(s severity.Severity, logger *logr.Logger, filter // printWithFileLine behaves like print but uses the provided file and line number. If // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity.Severity, logger *logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { +func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { buf := l.formatHeader(s, file, line) - // if logr is set, we clear the generated header as we rely on the backing - // logr implementation to print headers - if logger != nil { - l.bufferCache.PutBuffer(buf) - buf = l.bufferCache.GetBuffer() + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() } if filter != nil { args = filter.Filter(args) @@ -652,7 +760,7 @@ func (l *loggingT) printWithFileLine(s severity.Severity, logger *logr.Logger, f } // if loggr is specified, will call loggr.Error, otherwise output with logging module. -func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { +func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } @@ -664,7 +772,7 @@ func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, dept } // if loggr is specified, will call loggr.Info, otherwise output with logging module. -func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { +func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } @@ -679,7 +787,7 @@ func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg s // set log severity by s func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) { // Only create a new buffer if we don't have one cached. - b := l.bufferCache.GetBuffer() + b := buffer.GetBuffer() // The message is always quoted, even if it contains line breaks. // If developers want multi-line output, they should use a small, fixed // message and put the multi-line output into a value. @@ -688,9 +796,9 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, serialize.KVListFormat(&b.Buffer, "err", err) } serialize.KVListFormat(&b.Buffer, keysAndValues...) - l.printDepth(s, globalLogger, nil, depth+1, &b.Buffer) + l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer) // Make the buffer available for reuse. - l.bufferCache.PutBuffer(b) + buffer.PutBuffer(b) } // redirectBuffer is used to set an alternate destination for the logs @@ -745,7 +853,7 @@ func LogToStderr(stderr bool) { } // output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) { +func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) { var isLocked = true l.mu.Lock() defer func() { @@ -757,17 +865,21 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf if l.traceLocation.isSet() { if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) + buf.Write(dbg.Stacks(false)) } } data := buf.Bytes() - if log != nil { - // TODO: set 'severity' and caller information as structured log info - // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} - if s == severity.ErrorLog { - globalLogger.WithCallDepth(depth+3).Error(nil, string(data)) + if logger != nil { + if logger.writeKlogBuffer != nil { + logger.writeKlogBuffer(data) } else { - log.WithCallDepth(depth + 3).Info(string(data)) + // TODO: set 'severity' and caller information as structured log info + // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} + if s == severity.ErrorLog { + logger.WithCallDepth(depth+3).Error(nil, string(data)) + } else { + logger.WithCallDepth(depth + 3).Info(string(data)) + } } } else if l.toStderr { os.Stderr.Write(data) @@ -822,12 +934,15 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf OsExit(1) } // Dump all goroutine stacks before exiting. - trace := stacks(true) - // Write the stack trace for all goroutines to the stderr. - if l.toStderr || l.alsoToStderr || s >= l.stderrThreshold.get() || alsoToStderr { - os.Stderr.Write(trace) + // First, make sure we see the trace for the current goroutine on standard error. + // If -logtostderr has been specified, the loop below will do that anyway + // as the first stack in the full dump. + if !l.toStderr { + os.Stderr.Write(dbg.Stacks(false)) } + // Write the stack trace for all goroutines to the files. + trace := dbg.Stacks(true) logExitFunc = func(error) {} // If we get a write error, we'll still exit below. for log := severity.FatalLog; log >= severity.InfoLog; log-- { if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. @@ -839,7 +954,7 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf timeoutFlush(ExitFlushTimeout) OsExit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. } - l.bufferCache.PutBuffer(buf) + buffer.PutBuffer(buf) if stats := severityStats[s]; stats != nil { atomic.AddInt64(&stats.lines, 1) @@ -847,25 +962,6 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf } } -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - // logExitFunc provides a simple mechanism to override the default behavior // of exiting on error. Used in testing and to guarantee we reach a required exit // for fatal logs. Instead, exit could be a function rather than a method but that @@ -1077,9 +1173,9 @@ func (f *flushDaemon) isRunning() bool { return f.stopC != nil } -// StopFlushDaemon stops the flush daemon, if running. +// StopFlushDaemon stops the flush daemon, if running, and flushes once. // This prevents klog from leaking goroutines on shutdown. After stopping -// the daemon, you can still manually flush buffers by calling Flush(). +// the daemon, you can still manually flush buffers again by calling Flush(). func StopFlushDaemon() { logging.flushD.stop() } @@ -1109,8 +1205,8 @@ func (l *loggingT) flushAll() { file.Sync() // ignore error } } - if globalLoggerOptions.flush != nil { - globalLoggerOptions.flush() + if logging.loggerOptions.flush != nil { + logging.loggerOptions.flush() } } @@ -1132,6 +1228,19 @@ func CopyStandardLogTo(name string) { stdLog.SetOutput(logBridge(sev)) } +// NewStandardLogger returns a Logger that writes to the klog logs for the +// named and lower severities. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, NewStandardLogger panics. +func NewStandardLogger(name string) *stdLog.Logger { + sev, ok := severity.ByName(name) + if !ok { + panic(fmt.Sprintf("klog.NewStandardLogger(%q): unknown severity", name)) + } + return stdLog.New(logBridge(sev), "", stdLog.Lshortfile) +} + // logBridge provides the Write method that enables CopyStandardLogTo to connect // Go's standard logs to the logs provided by this package. type logBridge severity.Severity @@ -1158,7 +1267,7 @@ func (lb logBridge) Write(b []byte) (n int, err error) { } // printWithFileLine with alsoToStderr=true, so standard log messages // always appear on standard error. - logging.printWithFileLine(severity.Severity(lb), globalLogger, logging.filter, file, line, true, text) + logging.printWithFileLine(severity.Severity(lb), logging.logger, logging.filter, file, line, true, text) return len(b), nil } @@ -1192,24 +1301,28 @@ func (l *loggingT) setV(pc uintptr) Level { // See the documentation of V for more information. type Verbose struct { enabled bool - logr *logr.Logger + logger *logWriter } func newVerbose(level Level, b bool) Verbose { - if globalLogger == nil { + if logging.logger == nil { return Verbose{b, nil} } - v := globalLogger.V(int(level)) - return Verbose{b, &v} + v := logging.logger.V(int(level)) + return Verbose{b, &logWriter{Logger: v, writeKlogBuffer: logging.loggerOptions.writeKlogBuffer}} } // V reports whether verbosity at the call site is at least the requested level. // The returned value is a struct of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either +// // if klog.V(2).Enabled() { klog.Info("log this") } +// // or +// // klog.V(2).Info("log this") +// // The second form is shorter but the first is cheaper if logging is off because it does // not evaluate its arguments. // @@ -1219,6 +1332,13 @@ func newVerbose(level Level, b bool) Verbose { // less than or equal to the value of the -vmodule pattern matching the source file // containing the call. func V(level Level) Verbose { + return VDepth(1, level) +} + +// VDepth is a variant of V that accepts a number of stack frames that will be +// skipped when checking the -vmodule patterns. VDepth(0) is equivalent to +// V(). +func VDepth(depth int, level Level) Verbose { // This function tries hard to be cheap unless there's work to do. // The fast path is two atomic loads and compares. @@ -1235,7 +1355,7 @@ func V(level Level) Verbose { // but if V logging is enabled we're slow anyway. logging.mu.Lock() defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { + if runtime.Callers(2+depth, logging.pcs[:]) == 0 { return newVerbose(level, false) } // runtime.Callers returns "return PCs", but we want @@ -1263,7 +1383,7 @@ func (v Verbose) Enabled() bool { // See the documentation of V for usage. func (v Verbose) Info(args ...interface{}) { if v.enabled { - logging.print(severity.InfoLog, v.logr, logging.filter, args...) + logging.print(severity.InfoLog, v.logger, logging.filter, args...) } } @@ -1271,7 +1391,7 @@ func (v Verbose) Info(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoDepth(depth int, args ...interface{}) { if v.enabled { - logging.printDepth(severity.InfoLog, v.logr, logging.filter, depth, args...) + logging.printDepth(severity.InfoLog, v.logger, logging.filter, depth, args...) } } @@ -1279,7 +1399,7 @@ func (v Verbose) InfoDepth(depth int, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infoln(args ...interface{}) { if v.enabled { - logging.println(severity.InfoLog, v.logr, logging.filter, args...) + logging.println(severity.InfoLog, v.logger, logging.filter, args...) } } @@ -1287,7 +1407,7 @@ func (v Verbose) Infoln(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfolnDepth(depth int, args ...interface{}) { if v.enabled { - logging.printlnDepth(severity.InfoLog, v.logr, logging.filter, depth, args...) + logging.printlnDepth(severity.InfoLog, v.logger, logging.filter, depth, args...) } } @@ -1295,7 +1415,7 @@ func (v Verbose) InfolnDepth(depth int, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infof(format string, args ...interface{}) { if v.enabled { - logging.printf(severity.InfoLog, v.logr, logging.filter, format, args...) + logging.printf(severity.InfoLog, v.logger, logging.filter, format, args...) } } @@ -1303,7 +1423,7 @@ func (v Verbose) Infof(format string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) { if v.enabled { - logging.printfDepth(severity.InfoLog, v.logr, logging.filter, depth, format, args...) + logging.printfDepth(severity.InfoLog, v.logger, logging.filter, depth, format, args...) } } @@ -1311,28 +1431,28 @@ func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) { if v.enabled { - logging.infoS(v.logr, logging.filter, 0, msg, keysAndValues...) + logging.infoS(v.logger, logging.filter, 0, msg, keysAndValues...) } } // InfoSDepth acts as InfoS but uses depth to determine which call frame to log. // InfoSDepth(0, "msg") is the same as InfoS("msg"). func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { - logging.infoS(globalLogger, logging.filter, depth, msg, keysAndValues...) + logging.infoS(logging.logger, logging.filter, depth, msg, keysAndValues...) } // InfoSDepth is equivalent to the global InfoSDepth function, guarded by the value of v. // See the documentation of V for usage. func (v Verbose) InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { if v.enabled { - logging.infoS(v.logr, logging.filter, depth, msg, keysAndValues...) + logging.infoS(v.logger, logging.filter, depth, msg, keysAndValues...) } } // Deprecated: Use ErrorS instead. func (v Verbose) Error(err error, msg string, args ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, logging.filter, 0, msg, args...) + logging.errorS(err, v.logger, logging.filter, 0, msg, args...) } } @@ -1340,44 +1460,44 @@ func (v Verbose) Error(err error, msg string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, logging.filter, 0, msg, keysAndValues...) + logging.errorS(err, v.logger, logging.filter, 0, msg, keysAndValues...) } } // Info logs to the INFO log. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Info(args ...interface{}) { - logging.print(severity.InfoLog, globalLogger, logging.filter, args...) + logging.print(severity.InfoLog, logging.logger, logging.filter, args...) } // InfoDepth acts as Info but uses depth to determine which call frame to log. // InfoDepth(0, "msg") is the same as Info("msg"). func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(severity.InfoLog, globalLogger, logging.filter, depth, args...) + logging.printDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...) } // Infoln logs to the INFO log. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { - logging.println(severity.InfoLog, globalLogger, logging.filter, args...) + logging.println(severity.InfoLog, logging.logger, logging.filter, args...) } // InfolnDepth acts as Infoln but uses depth to determine which call frame to log. // InfolnDepth(0, "msg") is the same as Infoln("msg"). func InfolnDepth(depth int, args ...interface{}) { - logging.printlnDepth(severity.InfoLog, globalLogger, logging.filter, depth, args...) + logging.printlnDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...) } // Infof logs to the INFO log. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Infof(format string, args ...interface{}) { - logging.printf(severity.InfoLog, globalLogger, logging.filter, format, args...) + logging.printf(severity.InfoLog, logging.logger, logging.filter, format, args...) } // InfofDepth acts as Infof but uses depth to determine which call frame to log. // InfofDepth(0, "msg", args...) is the same as Infof("msg", args...). func InfofDepth(depth int, format string, args ...interface{}) { - logging.printfDepth(severity.InfoLog, globalLogger, logging.filter, depth, format, args...) + logging.printfDepth(severity.InfoLog, logging.logger, logging.filter, depth, format, args...) } // InfoS structured logs to the INFO log. @@ -1389,79 +1509,79 @@ func InfofDepth(depth int, format string, args ...interface{}) { // output: // >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready" func InfoS(msg string, keysAndValues ...interface{}) { - logging.infoS(globalLogger, logging.filter, 0, msg, keysAndValues...) + logging.infoS(logging.logger, logging.filter, 0, msg, keysAndValues...) } // Warning logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Warning(args ...interface{}) { - logging.print(severity.WarningLog, globalLogger, logging.filter, args...) + logging.print(severity.WarningLog, logging.logger, logging.filter, args...) } // WarningDepth acts as Warning but uses depth to determine which call frame to log. // WarningDepth(0, "msg") is the same as Warning("msg"). func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(severity.WarningLog, globalLogger, logging.filter, depth, args...) + logging.printDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...) } // Warningln logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { - logging.println(severity.WarningLog, globalLogger, logging.filter, args...) + logging.println(severity.WarningLog, logging.logger, logging.filter, args...) } // WarninglnDepth acts as Warningln but uses depth to determine which call frame to log. // WarninglnDepth(0, "msg") is the same as Warningln("msg"). func WarninglnDepth(depth int, args ...interface{}) { - logging.printlnDepth(severity.WarningLog, globalLogger, logging.filter, depth, args...) + logging.printlnDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...) } // Warningf logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Warningf(format string, args ...interface{}) { - logging.printf(severity.WarningLog, globalLogger, logging.filter, format, args...) + logging.printf(severity.WarningLog, logging.logger, logging.filter, format, args...) } // WarningfDepth acts as Warningf but uses depth to determine which call frame to log. // WarningfDepth(0, "msg", args...) is the same as Warningf("msg", args...). func WarningfDepth(depth int, format string, args ...interface{}) { - logging.printfDepth(severity.WarningLog, globalLogger, logging.filter, depth, format, args...) + logging.printfDepth(severity.WarningLog, logging.logger, logging.filter, depth, format, args...) } // Error logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Error(args ...interface{}) { - logging.print(severity.ErrorLog, globalLogger, logging.filter, args...) + logging.print(severity.ErrorLog, logging.logger, logging.filter, args...) } // ErrorDepth acts as Error but uses depth to determine which call frame to log. // ErrorDepth(0, "msg") is the same as Error("msg"). func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(severity.ErrorLog, globalLogger, logging.filter, depth, args...) + logging.printDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...) } // Errorln logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { - logging.println(severity.ErrorLog, globalLogger, logging.filter, args...) + logging.println(severity.ErrorLog, logging.logger, logging.filter, args...) } // ErrorlnDepth acts as Errorln but uses depth to determine which call frame to log. // ErrorlnDepth(0, "msg") is the same as Errorln("msg"). func ErrorlnDepth(depth int, args ...interface{}) { - logging.printlnDepth(severity.ErrorLog, globalLogger, logging.filter, depth, args...) + logging.printlnDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...) } // Errorf logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Errorf(format string, args ...interface{}) { - logging.printf(severity.ErrorLog, globalLogger, logging.filter, format, args...) + logging.printf(severity.ErrorLog, logging.logger, logging.filter, format, args...) } // ErrorfDepth acts as Errorf but uses depth to determine which call frame to log. // ErrorfDepth(0, "msg", args...) is the same as Errorf("msg", args...). func ErrorfDepth(depth int, format string, args ...interface{}) { - logging.printfDepth(severity.ErrorLog, globalLogger, logging.filter, depth, format, args...) + logging.printfDepth(severity.ErrorLog, logging.logger, logging.filter, depth, format, args...) } // ErrorS structured logs to the ERROR, WARNING, and INFO logs. @@ -1474,52 +1594,63 @@ func ErrorfDepth(depth int, format string, args ...interface{}) { // output: // >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout" func ErrorS(err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, globalLogger, logging.filter, 0, msg, keysAndValues...) + logging.errorS(err, logging.logger, logging.filter, 0, msg, keysAndValues...) } // ErrorSDepth acts as ErrorS but uses depth to determine which call frame to log. // ErrorSDepth(0, "msg") is the same as ErrorS("msg"). func ErrorSDepth(depth int, err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, globalLogger, logging.filter, depth, msg, keysAndValues...) + logging.errorS(err, logging.logger, logging.filter, depth, msg, keysAndValues...) } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls OsExit(255). +// prints stack trace(s), then calls OsExit(255). +// +// Stderr only receives a dump of the current goroutine's stack trace. Log files, +// if there are any, receive a dump of the stack traces in all goroutines. +// +// Callers who want more control over handling of fatal events may instead use a +// combination of different functions: +// - some info or error logging function, optionally with a stack trace +// value generated by github.com/go-logr/lib/dbg.Backtrace +// - Flush to flush pending log data +// - panic, os.Exit or returning to the caller with an error +// // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Fatal(args ...interface{}) { - logging.print(severity.FatalLog, globalLogger, logging.filter, args...) + logging.print(severity.FatalLog, logging.logger, logging.filter, args...) } // FatalDepth acts as Fatal but uses depth to determine which call frame to log. // FatalDepth(0, "msg") is the same as Fatal("msg"). func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) + logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { - logging.println(severity.FatalLog, globalLogger, logging.filter, args...) + logging.println(severity.FatalLog, logging.logger, logging.filter, args...) } // FatallnDepth acts as Fatalln but uses depth to determine which call frame to log. // FatallnDepth(0, "msg") is the same as Fatalln("msg"). func FatallnDepth(depth int, args ...interface{}) { - logging.printlnDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) + logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } // Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Fatalf(format string, args ...interface{}) { - logging.printf(severity.FatalLog, globalLogger, logging.filter, format, args...) + logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...) } // FatalfDepth acts as Fatalf but uses depth to determine which call frame to log. // FatalfDepth(0, "msg", args...) is the same as Fatalf("msg", args...). func FatalfDepth(depth int, format string, args ...interface{}) { - logging.printfDepth(severity.FatalLog, globalLogger, logging.filter, depth, format, args...) + logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...) } // fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. @@ -1530,41 +1661,41 @@ var fatalNoStacks uint32 // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Exit(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(severity.FatalLog, globalLogger, logging.filter, args...) + logging.print(severity.FatalLog, logging.logger, logging.filter, args...) } // ExitDepth acts as Exit but uses depth to determine which call frame to log. // ExitDepth(0, "msg") is the same as Exit("msg"). func ExitDepth(depth int, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) + logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } // Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). func Exitln(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(severity.FatalLog, globalLogger, logging.filter, args...) + logging.println(severity.FatalLog, logging.logger, logging.filter, args...) } // ExitlnDepth acts as Exitln but uses depth to determine which call frame to log. // ExitlnDepth(0, "msg") is the same as Exitln("msg"). func ExitlnDepth(depth int, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printlnDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) + logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } // Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Exitf(format string, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(severity.FatalLog, globalLogger, logging.filter, format, args...) + logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...) } // ExitfDepth acts as Exitf but uses depth to determine which call frame to log. // ExitfDepth(0, "msg", args...) is the same as Exitf("msg", args...). func ExitfDepth(depth int, format string, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printfDepth(severity.FatalLog, globalLogger, logging.filter, depth, format, args...) + logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...) } // LogFilter is a collection of functions that can filter all logging calls, diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go index 351d7a740..15de00e21 100644 --- a/vendor/k8s.io/klog/v2/klogr.go +++ b/vendor/k8s.io/klog/v2/klogr.go @@ -42,24 +42,26 @@ func (l *klogger) Init(info logr.RuntimeInfo) { l.callDepth += info.CallDepth } -func (l klogger) Info(level int, msg string, kvList ...interface{}) { - trimmed := serialize.TrimDuplicates(l.values, kvList) +func (l *klogger) Info(level int, msg string, kvList ...interface{}) { + merged := serialize.MergeKVs(l.values, kvList) if l.prefix != "" { msg = l.prefix + ": " + msg } - V(Level(level)).InfoSDepth(l.callDepth+1, msg, append(trimmed[0], trimmed[1]...)...) + // Skip this function. + VDepth(l.callDepth+1, Level(level)).InfoSDepth(l.callDepth+1, msg, merged...) } -func (l klogger) Enabled(level int) bool { - return V(Level(level)).Enabled() +func (l *klogger) Enabled(level int) bool { + // Skip this function and logr.Logger.Info where Enabled is called. + return VDepth(l.callDepth+2, Level(level)).Enabled() } -func (l klogger) Error(err error, msg string, kvList ...interface{}) { - trimmed := serialize.TrimDuplicates(l.values, kvList) +func (l *klogger) Error(err error, msg string, kvList ...interface{}) { + merged := serialize.MergeKVs(l.values, kvList) if l.prefix != "" { msg = l.prefix + ": " + msg } - ErrorSDepth(l.callDepth+1, err, msg, append(trimmed[0], trimmed[1]...)...) + ErrorSDepth(l.callDepth+1, err, msg, merged...) } // WithName returns a new logr.Logger with the specified name appended. klogr diff --git a/vendor/k8s.io/kube-openapi/pkg/cached/cache.go b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go new file mode 100644 index 000000000..76415b783 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go @@ -0,0 +1,312 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cache provides a cache mechanism based on etags to lazily +// build, and/or cache results from expensive operation such that those +// operations are not repeated unnecessarily. The operations can be +// created as a tree, and replaced dynamically as needed. +// +// All the operations in this module are thread-safe. +// +// # Dependencies and types of caches +// +// This package uses a source/transform/sink model of caches to build +// the dependency tree, and can be used as follows: +// - [NewSource]: A source cache that recomputes the content every time. +// - [NewStaticSource]: A source cache that always produces the +// same content, it is only called once. +// - [NewTransformer]: A cache that transforms data from one format to +// another. It's only refreshed when the source changes. +// - [NewMerger]: A cache that aggregates multiple caches into one. +// It's only refreshed when the source changes. +// - [Replaceable]: A cache adapter that can be atomically +// replaced with a new one, and saves the previous results in case an +// error pops-up. +// +// # Etags +// +// Etags in this library is a cache version identifier. It doesn't +// necessarily strictly match to the semantics of http `etags`, but are +// somewhat inspired from it and function with the same principles. +// Hashing the content is a good way to guarantee that your function is +// never going to be called spuriously. In Kubernetes world, this could +// be a `resourceVersion`, this can be an actual etag, a hash, a UUID +// (if the cache always changes), or even a made-up string when the +// content of the cache never changes. +package cached + +import ( + "fmt" + "sync" + "sync/atomic" +) + +// Result is the content returned from a call to a cache. It can either +// be created with [NewResultOK] if the call was a success, or +// [NewResultErr] if the call resulted in an error. +type Result[T any] struct { + Data T + Etag string + Err error +} + +// NewResultOK creates a new [Result] for a successful operation. +func NewResultOK[T any](data T, etag string) Result[T] { + return Result[T]{ + Data: data, + Etag: etag, + } +} + +// NewResultErr creates a new [Result] when an error has happened. +func NewResultErr[T any](err error) Result[T] { + return Result[T]{ + Err: err, + } +} + +// Result can be treated as a [Data] if necessary. +func (r Result[T]) Get() Result[T] { + return r +} + +// Data is a cache that performs an action whose result data will be +// cached. It also returns an "etag" identifier to version the cache, so +// that the caller can know if they have the most recent version of the +// cache (and can decide to cache some operation based on that). +// +// The [NewMerger] and [NewTransformer] automatically handle +// that for you by checking if the etag is updated before calling the +// merging or transforming function. +type Data[T any] interface { + // Returns the cached data, as well as an "etag" to identify the + // version of the cache, or an error if something happened. + Get() Result[T] +} + +// NewMerger creates a new merge cache, a cache that merges the result +// of other caches. The function only gets called if any of the +// dependency has changed. +// +// If any of the dependency returned an error before, or any of the +// dependency returned an error this time, or if the mergeFn failed +// before, then the function is reran. +// +// The caches and results are mapped by K so that associated data can be +// retrieved. The map of dependencies can not be modified after +// creation, and a new merger should be created (and probably replaced +// using a [Replaceable]). +// +// Note that this assumes there is no "partial" merge, the merge +// function will remerge all the dependencies together everytime. Since +// the list of dependencies is constant, there is no way to save some +// partial merge information either. +// +// Also note that Golang map iteration is not stable. If the mergeFn +// depends on the order iteration to be stable, it will need to +// implement its own sorting or iteration order. +func NewMerger[K comparable, T, V any](mergeFn func(results map[K]Result[T]) Result[V], caches map[K]Data[T]) Data[V] { + listCaches := make([]Data[T], 0, len(caches)) + // maps from index to key + indexes := make(map[int]K, len(caches)) + i := 0 + for k := range caches { + listCaches = append(listCaches, caches[k]) + indexes[i] = k + i++ + } + + return NewListMerger(func(results []Result[T]) Result[V] { + if len(results) != len(indexes) { + panic(fmt.Errorf("invalid result length %d, expected %d", len(results), len(indexes))) + } + m := make(map[K]Result[T], len(results)) + for i := range results { + m[indexes[i]] = results[i] + } + return mergeFn(m) + }, listCaches) +} + +type listMerger[T, V any] struct { + lock sync.Mutex + mergeFn func([]Result[T]) Result[V] + caches []Data[T] + cacheResults []Result[T] + result Result[V] +} + +// NewListMerger creates a new merge cache that merges the results of +// other caches in list form. The function only gets called if any of +// the dependency has changed. +// +// The benefit of ListMerger over the basic Merger is that caches are +// stored in an ordered list so the order of the cache will be +// preserved in the order of the results passed to the mergeFn. +// +// If any of the dependency returned an error before, or any of the +// dependency returned an error this time, or if the mergeFn failed +// before, then the function is reran. +// +// Note that this assumes there is no "partial" merge, the merge +// function will remerge all the dependencies together everytime. Since +// the list of dependencies is constant, there is no way to save some +// partial merge information either. +func NewListMerger[T, V any](mergeFn func(results []Result[T]) Result[V], caches []Data[T]) Data[V] { + return &listMerger[T, V]{ + mergeFn: mergeFn, + caches: caches, + } +} + +func (c *listMerger[T, V]) prepareResultsLocked() []Result[T] { + cacheResults := make([]Result[T], len(c.caches)) + ch := make(chan struct { + int + Result[T] + }, len(c.caches)) + for i := range c.caches { + go func(index int) { + ch <- struct { + int + Result[T] + }{ + index, + c.caches[index].Get(), + } + }(i) + } + for i := 0; i < len(c.caches); i++ { + res := <-ch + cacheResults[res.int] = res.Result + } + return cacheResults +} + +func (c *listMerger[T, V]) needsRunningLocked(results []Result[T]) bool { + if c.cacheResults == nil { + return true + } + if c.result.Err != nil { + return true + } + if len(results) != len(c.cacheResults) { + panic(fmt.Errorf("invalid number of results: %v (expected %v)", len(results), len(c.cacheResults))) + } + for i, oldResult := range c.cacheResults { + newResult := results[i] + if newResult.Etag != oldResult.Etag || newResult.Err != nil || oldResult.Err != nil { + return true + } + } + return false +} + +func (c *listMerger[T, V]) Get() Result[V] { + c.lock.Lock() + defer c.lock.Unlock() + cacheResults := c.prepareResultsLocked() + if c.needsRunningLocked(cacheResults) { + c.cacheResults = cacheResults + c.result = c.mergeFn(c.cacheResults) + } + return c.result +} + +// NewTransformer creates a new cache that transforms the result of +// another cache. The transformFn will only be called if the source +// cache has updated the output, otherwise, the cached result will be +// returned. +// +// If the dependency returned an error before, or it returns an error +// this time, or if the transformerFn failed before, the function is +// reran. +func NewTransformer[T, V any](transformerFn func(Result[T]) Result[V], source Data[T]) Data[V] { + return NewListMerger(func(caches []Result[T]) Result[V] { + if len(caches) != 1 { + panic(fmt.Errorf("invalid cache for transformer cache: %v", caches)) + } + return transformerFn(caches[0]) + }, []Data[T]{source}) +} + +// NewSource creates a new cache that generates some data. This +// will always be called since we don't know the origin of the data and +// if it needs to be updated or not. sourceFn MUST be thread-safe. +func NewSource[T any](sourceFn func() Result[T]) Data[T] { + c := source[T](sourceFn) + return &c +} + +type source[T any] func() Result[T] + +func (c *source[T]) Get() Result[T] { + return (*c)() +} + +// NewStaticSource creates a new cache that always generates the +// same data. This will only be called once (lazily). +func NewStaticSource[T any](staticFn func() Result[T]) Data[T] { + return &static[T]{ + fn: staticFn, + } +} + +type static[T any] struct { + once sync.Once + fn func() Result[T] + result Result[T] +} + +func (c *static[T]) Get() Result[T] { + c.once.Do(func() { + c.result = c.fn() + }) + return c.result +} + +// Replaceable is a cache that carries the result even when the cache is +// replaced. This is the type that should typically be stored in +// structs. +type Replaceable[T any] struct { + cache atomic.Pointer[Data[T]] + result atomic.Pointer[Result[T]] +} + +// Get retrieves the data from the underlying source. [Replaceable] +// implements the [Data] interface itself. This is a pass-through +// that calls the most recent underlying cache. If the cache fails but +// previously had returned a success, that success will be returned +// instead. If the cache fails but we never returned a success, that +// failure is returned. +func (c *Replaceable[T]) Get() Result[T] { + result := (*c.cache.Load()).Get() + + for { + cResult := c.result.Load() + if result.Err != nil && cResult != nil && cResult.Err == nil { + return *cResult + } + if c.result.CompareAndSwap(cResult, &result) { + return result + } + } +} + +// Replace changes the cache. +func (c *Replaceable[T]) Replace(cache Data[T]) { + c.cache.Swap(&cache) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go index c84ca7031..1a6c12e17 100644 --- a/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -20,7 +20,7 @@ import ( "net/http" "strings" - "github.com/emicklei/go-restful" + "github.com/emicklei/go-restful/v3" "k8s.io/kube-openapi/pkg/openapiconv" "k8s.io/kube-openapi/pkg/spec3" @@ -246,38 +246,42 @@ var schemaTypeFormatMap = map[string]typeInfo{ // the spec does not need to be simple type,format) or can even return a simple type,format (e.g. IntOrString). For simple // type formats, the benefit of adding OpenAPIDefinitionGetter interface is to keep both type and property documentation. // Example: -// type Sample struct { -// ... -// // port of the server -// port IntOrString -// ... -// } +// +// type Sample struct { +// ... +// // port of the server +// port IntOrString +// ... +// } +// // // IntOrString documentation... // type IntOrString { ... } // // Adding IntOrString to this function: -// "port" : { -// format: "string", -// type: "int-or-string", -// Description: "port of the server" -// } +// +// "port" : { +// format: "string", +// type: "int-or-string", +// Description: "port of the server" +// } // // Implement OpenAPIDefinitionGetter for IntOrString: // -// "port" : { -// $Ref: "#/definitions/IntOrString" -// Description: "port of the server" -// } +// "port" : { +// $Ref: "#/definitions/IntOrString" +// Description: "port of the server" +// } +// // ... // definitions: -// { -// "IntOrString": { -// format: "string", -// type: "int-or-string", -// Description: "IntOrString documentation..." // new -// } -// } // +// { +// "IntOrString": { +// format: "string", +// type: "int-or-string", +// Description: "IntOrString documentation..." // new +// } +// } func OpenAPITypeFormat(typeName string) (string, string) { mapped, ok := schemaTypeFormatMap[typeName] if !ok { diff --git a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go index ec4adcdec..2263e2f32 100644 --- a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go +++ b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go @@ -21,35 +21,28 @@ import ( "crypto/sha512" "encoding/json" "fmt" - "mime" "net/http" "net/url" "path" - "sort" "strconv" "strings" "sync" "time" "github.com/golang/protobuf/proto" - openapi_v3 "github.com/google/gnostic/openapiv3" + openapi_v3 "github.com/google/gnostic-models/openapiv3" + "github.com/google/uuid" "github.com/munnerz/goautoneg" + "k8s.io/klog/v2" + "k8s.io/kube-openapi/pkg/cached" "k8s.io/kube-openapi/pkg/common" - "k8s.io/kube-openapi/pkg/internal/handler" "k8s.io/kube-openapi/pkg/spec3" - "k8s.io/kube-openapi/pkg/validation/spec" ) const ( - jsonExt = ".json" - - mimeJson = "application/json" - // TODO(mehdy): change @68f4ded to a version tag when gnostic add version tags. - mimePb = "application/com.github.googleapis.gnostic.OpenAPIv3@68f4ded+protobuf" - mimePbGz = "application/x-gzip" - - subTypeProtobuf = "com.github.proto-openapi.spec.v3@v1.0+protobuf" - subTypeJSON = "json" + subTypeProtobufDeprecated = "com.github.proto-openapi.spec.v3@v1.0+protobuf" + subTypeProtobuf = "com.github.proto-openapi.spec.v3.v1.0+protobuf" + subTypeJSON = "json" ) // OpenAPIV3Discovery is the format of the Discovery document for OpenAPI V3 @@ -65,29 +58,63 @@ type OpenAPIV3DiscoveryGroupVersion struct { ServerRelativeURL string `json:"serverRelativeURL"` } -// OpenAPIService is the service responsible for serving OpenAPI spec. It has -// the ability to safely change the spec while serving it. -type OpenAPIService struct { - // rwMutex protects All members of this service. - rwMutex sync.RWMutex +func ToV3ProtoBinary(json []byte) ([]byte, error) { + document, err := openapi_v3.ParseDocument(json) + if err != nil { + return nil, err + } + return proto.Marshal(document) +} + +type timedSpec struct { + spec []byte lastModified time.Time - v3Schema map[string]*OpenAPIV3Group } -type OpenAPIV3Group struct { - rwMutex sync.RWMutex +// This type is protected by the lock on OpenAPIService. +type openAPIV3Group struct { + specCache cached.Replaceable[*spec3.OpenAPI] + pbCache cached.Data[timedSpec] + jsonCache cached.Data[timedSpec] +} - lastModified time.Time +func newOpenAPIV3Group() *openAPIV3Group { + o := &openAPIV3Group{} + o.jsonCache = cached.NewTransformer[*spec3.OpenAPI](func(result cached.Result[*spec3.OpenAPI]) cached.Result[timedSpec] { + if result.Err != nil { + return cached.NewResultErr[timedSpec](result.Err) + } + json, err := json.Marshal(result.Data) + if err != nil { + return cached.NewResultErr[timedSpec](err) + } + return cached.NewResultOK(timedSpec{spec: json, lastModified: time.Now()}, computeETag(json)) + }, &o.specCache) + o.pbCache = cached.NewTransformer(func(result cached.Result[timedSpec]) cached.Result[timedSpec] { + if result.Err != nil { + return cached.NewResultErr[timedSpec](result.Err) + } + proto, err := ToV3ProtoBinary(result.Data.spec) + if err != nil { + return cached.NewResultErr[timedSpec](err) + } + return cached.NewResultOK(timedSpec{spec: proto, lastModified: result.Data.lastModified}, result.Etag) + }, o.jsonCache) + return o +} - pbCache handler.HandlerCache - jsonCache handler.HandlerCache - etagCache handler.HandlerCache +func (o *openAPIV3Group) UpdateSpec(openapi cached.Data[*spec3.OpenAPI]) { + o.specCache.Replace(openapi) } -func init() { - mime.AddExtensionType(".json", mimeJson) - mime.AddExtensionType(".pb-v1", mimePb) - mime.AddExtensionType(".gz", mimePbGz) +// OpenAPIService is the service responsible for serving OpenAPI spec. It has +// the ability to safely change the spec while serving it. +type OpenAPIService struct { + // Mutex protects the schema map. + mutex sync.Mutex + v3Schema map[string]*openAPIV3Group + + discoveryCache cached.Replaceable[timedSpec] } func computeETag(data []byte) string { @@ -106,92 +133,90 @@ func constructServerRelativeURL(gvString, etag string) string { } // NewOpenAPIService builds an OpenAPIService starting with the given spec. -func NewOpenAPIService(spec *spec.Swagger) (*OpenAPIService, error) { +func NewOpenAPIService() *OpenAPIService { o := &OpenAPIService{} - o.v3Schema = make(map[string]*OpenAPIV3Group) - return o, nil + o.v3Schema = make(map[string]*openAPIV3Group) + // We're not locked because we haven't shared the structure yet. + o.discoveryCache.Replace(o.buildDiscoveryCacheLocked()) + return o } -func (o *OpenAPIService) getGroupBytes() ([]byte, error) { - o.rwMutex.RLock() - defer o.rwMutex.RUnlock() - keys := make([]string, len(o.v3Schema)) - i := 0 - for k := range o.v3Schema { - keys[i] = k - i++ +func (o *OpenAPIService) buildDiscoveryCacheLocked() cached.Data[timedSpec] { + caches := make(map[string]cached.Data[timedSpec], len(o.v3Schema)) + for gvName, group := range o.v3Schema { + caches[gvName] = group.jsonCache } - - sort.Strings(keys) - discovery := &OpenAPIV3Discovery{Paths: make(map[string]OpenAPIV3DiscoveryGroupVersion)} - for gvString, groupVersion := range o.v3Schema { - etagBytes, err := groupVersion.etagCache.Get() - if err != nil { - return nil, err + return cached.NewMerger(func(results map[string]cached.Result[timedSpec]) cached.Result[timedSpec] { + discovery := &OpenAPIV3Discovery{Paths: make(map[string]OpenAPIV3DiscoveryGroupVersion)} + for gvName, result := range results { + if result.Err != nil { + return cached.NewResultErr[timedSpec](result.Err) + } + discovery.Paths[gvName] = OpenAPIV3DiscoveryGroupVersion{ + ServerRelativeURL: constructServerRelativeURL(gvName, result.Etag), + } } - discovery.Paths[gvString] = OpenAPIV3DiscoveryGroupVersion{ - ServerRelativeURL: constructServerRelativeURL(gvString, string(etagBytes)), + j, err := json.Marshal(discovery) + if err != nil { + return cached.NewResultErr[timedSpec](err) } - } - j, err := json.Marshal(discovery) - if err != nil { - return nil, err - } - return j, nil + return cached.NewResultOK(timedSpec{spec: j, lastModified: time.Now()}, computeETag(j)) + }, caches) } func (o *OpenAPIService) getSingleGroupBytes(getType string, group string) ([]byte, string, time.Time, error) { - o.rwMutex.RLock() - defer o.rwMutex.RUnlock() + o.mutex.Lock() + defer o.mutex.Unlock() v, ok := o.v3Schema[group] if !ok { return nil, "", time.Now(), fmt.Errorf("Cannot find CRD group %s", group) } - if getType == subTypeJSON { - specBytes, err := v.jsonCache.Get() - if err != nil { - return nil, "", v.lastModified, err - } - etagBytes, err := v.etagCache.Get() - return specBytes, string(etagBytes), v.lastModified, err - } else if getType == subTypeProtobuf { - specPb, err := v.pbCache.Get() - if err != nil { - return nil, "", v.lastModified, err - } - etagBytes, err := v.etagCache.Get() - return specPb, string(etagBytes), v.lastModified, err + result := cached.Result[timedSpec]{} + switch getType { + case subTypeJSON: + result = v.jsonCache.Get() + case subTypeProtobuf, subTypeProtobufDeprecated: + result = v.pbCache.Get() + default: + return nil, "", time.Now(), fmt.Errorf("Invalid accept clause %s", getType) } - return nil, "", time.Now(), fmt.Errorf("Invalid accept clause %s", getType) + return result.Data.spec, result.Etag, result.Data.lastModified, result.Err } -func (o *OpenAPIService) UpdateGroupVersion(group string, openapi *spec3.OpenAPI) (err error) { - o.rwMutex.Lock() - defer o.rwMutex.Unlock() - +// UpdateGroupVersionLazy adds or updates an existing group with the new cached. +func (o *OpenAPIService) UpdateGroupVersionLazy(group string, openapi cached.Data[*spec3.OpenAPI]) { + o.mutex.Lock() + defer o.mutex.Unlock() if _, ok := o.v3Schema[group]; !ok { - o.v3Schema[group] = &OpenAPIV3Group{} + o.v3Schema[group] = newOpenAPIV3Group() + // Since there is a new item, we need to re-build the cache map. + o.discoveryCache.Replace(o.buildDiscoveryCacheLocked()) } - return o.v3Schema[group].UpdateSpec(openapi) + o.v3Schema[group].UpdateSpec(openapi) } -func (o *OpenAPIService) DeleteGroupVersion(group string) { - o.rwMutex.Lock() - defer o.rwMutex.Unlock() - delete(o.v3Schema, group) +func (o *OpenAPIService) UpdateGroupVersion(group string, openapi *spec3.OpenAPI) { + o.UpdateGroupVersionLazy(group, cached.NewResultOK(openapi, uuid.New().String())) } -func ToV3ProtoBinary(json []byte) ([]byte, error) { - document, err := openapi_v3.ParseDocument(json) - if err != nil { - return nil, err - } - return proto.Marshal(document) +func (o *OpenAPIService) DeleteGroupVersion(group string) { + o.mutex.Lock() + defer o.mutex.Unlock() + delete(o.v3Schema, group) + // Rebuild the merge cache map since the items have changed. + o.discoveryCache.Replace(o.buildDiscoveryCacheLocked()) } func (o *OpenAPIService) HandleDiscovery(w http.ResponseWriter, r *http.Request) { - data, _ := o.getGroupBytes() - http.ServeContent(w, r, "/openapi/v3", time.Now(), bytes.NewReader(data)) + result := o.discoveryCache.Get() + if result.Err != nil { + klog.Errorf("Error serving discovery: %s", result.Err) + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set("Etag", strconv.Quote(result.Etag)) + w.Header().Set("Content-Type", "application/json") + http.ServeContent(w, r, "/openapi/v3", result.Data.lastModified, bytes.NewReader(result.Data.spec)) } func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Request) { @@ -210,11 +235,13 @@ func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Reque } accepted := []struct { - Type string - SubType string + Type string + SubType string + ReturnedContentType string }{ - {"application", subTypeJSON}, - {"application", subTypeProtobuf}, + {"application", subTypeJSON, "application/" + subTypeJSON}, + {"application", subTypeProtobuf, "application/" + subTypeProtobuf}, + {"application", subTypeProtobufDeprecated, "application/" + subTypeProtobuf}, } for _, clause := range clauses { @@ -229,6 +256,9 @@ func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Reque if err != nil { return } + // Set Content-Type header in the reponse + w.Header().Set("Content-Type", accepts.ReturnedContentType) + // ETag must be enclosed in double quotes: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag w.Header().Set("Etag", strconv.Quote(etag)) @@ -262,30 +292,3 @@ func (o *OpenAPIService) RegisterOpenAPIV3VersionedService(servePath string, han handler.HandlePrefix(servePath+"/", http.HandlerFunc(o.HandleGroupVersion)) return nil } - -func (o *OpenAPIV3Group) UpdateSpec(openapi *spec3.OpenAPI) (err error) { - o.rwMutex.Lock() - defer o.rwMutex.Unlock() - - o.jsonCache = o.jsonCache.New(func() ([]byte, error) { - return json.Marshal(openapi) - }) - o.pbCache = o.pbCache.New(func() ([]byte, error) { - json, err := o.jsonCache.Get() - if err != nil { - return nil, err - } - return ToV3ProtoBinary(json) - }) - // TODO: This forces a json marshal of corresponding group-versions. - // We should look to replace this with a faster hashing mechanism. - o.etagCache = o.etagCache.New(func() ([]byte, error) { - json, err := o.jsonCache.Get() - if err != nil { - return nil, err - } - return []byte(computeETag(json)), nil - }) - o.lastModified = time.Now() - return nil -} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/flags.go b/vendor/k8s.io/kube-openapi/pkg/internal/flags.go new file mode 100644 index 000000000..bef603782 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/flags.go @@ -0,0 +1,24 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +// Used by tests to selectively disable experimental JSON unmarshaler +var UseOptimizedJSONUnmarshaling bool = true +var UseOptimizedJSONUnmarshalingV3 bool = true + +// Used by tests to selectively disable experimental JSON marshaler +var UseOptimizedJSONMarshaling bool = true diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/handler/handler_cache.go b/vendor/k8s.io/kube-openapi/pkg/internal/handler/handler_cache.go deleted file mode 100644 index e128c26eb..000000000 --- a/vendor/k8s.io/kube-openapi/pkg/internal/handler/handler_cache.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package handler - -import ( - "sync" -) - -// HandlerCache represents a lazy cache for generating a byte array -// It is used to lazily marshal OpenAPI v2/v3 and lazily generate the ETag -type HandlerCache struct { - BuildCache func() ([]byte, error) - once sync.Once - bytes []byte - err error -} - -// Get either returns the cached value or calls BuildCache() once before caching and returning -// its results. If BuildCache returns an error, the last valid value for the cache (from prior -// calls to New()) is used instead if possible. -func (c *HandlerCache) Get() ([]byte, error) { - c.once.Do(func() { - bytes, err := c.BuildCache() - // if there is an error updating the cache, there can be situations where - // c.bytes contains a valid value (carried over from the previous update) - // but c.err is also not nil; the cache user is expected to check for this - c.err = err - if c.err == nil { - // don't override previous spec if we had an error - c.bytes = bytes - } - }) - return c.bytes, c.err -} - -// New creates a new HandlerCache for situations where a cache refresh is needed. -// This function is not thread-safe and should not be called at the same time as Get(). -func (c *HandlerCache) New(cacheBuilder func() ([]byte, error)) HandlerCache { - return HandlerCache{ - bytes: c.bytes, - BuildCache: cacheBuilder, - } -} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/serialization.go b/vendor/k8s.io/kube-openapi/pkg/internal/serialization.go new file mode 100644 index 000000000..7393bacf7 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/serialization.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "github.com/go-openapi/jsonreference" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" +) + +// DeterministicMarshal calls the jsonv2 library with the deterministic +// flag in order to have stable marshaling. +func DeterministicMarshal(in any) ([]byte, error) { + return jsonv2.MarshalOptions{Deterministic: true}.Marshal(jsonv2.EncodeOptions{}, in) +} + +// JSONRefFromMap populates a json reference object if the map v contains a $ref key. +func JSONRefFromMap(jsonRef *jsonreference.Ref, v map[string]interface{}) error { + if v == nil { + return nil + } + if vv, ok := v["$ref"]; ok { + if str, ok := vv.(string); ok { + ref, err := jsonreference.New(str) + if err != nil { + return err + } + *jsonRef = ref + } + } + return nil +} + +// SanitizeExtensions sanitizes the input map such that non extension +// keys (non x-*, X-*) keys are dropped from the map. Returns the new +// modified map, or nil if the map is now empty. +func SanitizeExtensions(e map[string]interface{}) map[string]interface{} { + for k := range e { + if !IsExtensionKey(k) { + delete(e, k) + } + } + if len(e) == 0 { + e = nil + } + return e +} + +// IsExtensionKey returns true if the input string is of format x-* or X-* +func IsExtensionKey(k string) bool { + return len(k) > 1 && (k[0] == 'x' || k[0] == 'X') && k[1] == '-' +} diff --git a/vendor/golang.org/x/time/AUTHORS b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/AUTHORS similarity index 74% rename from vendor/golang.org/x/time/AUTHORS rename to vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/AUTHORS index 15167cd74..2b00ddba0 100644 --- a/vendor/golang.org/x/time/AUTHORS +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/AUTHORS @@ -1,3 +1,3 @@ # This source code refers to The Go Authors for copyright purposes. # The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/time/CONTRIBUTORS b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/CONTRIBUTORS similarity index 70% rename from vendor/golang.org/x/time/CONTRIBUTORS rename to vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/CONTRIBUTORS index 1c4577e96..1fbd3e976 100644 --- a/vendor/golang.org/x/time/CONTRIBUTORS +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/CONTRIBUTORS @@ -1,3 +1,3 @@ # This source code was written by the Go contributors. # The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/PuerkitoBio/urlesc/LICENSE b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/LICENSE similarity index 96% rename from vendor/github.com/PuerkitoBio/urlesc/LICENSE rename to vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/LICENSE index 744875676..244127300 100644 --- a/vendor/github.com/PuerkitoBio/urlesc/LICENSE +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2020 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/README.md b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/README.md new file mode 100644 index 000000000..0349adf69 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/README.md @@ -0,0 +1,321 @@ +# JSON Serialization (v2) + +[![GoDev](https://img.shields.io/static/v1?label=godev&message=reference&color=00add8)](https://pkg.go.dev/github.com/go-json-experiment/json) +[![Build Status](https://github.com/go-json-experiment/json/actions/workflows/test.yml/badge.svg?branch=master)](https://github.com/go-json-experiment/json/actions) + +This module hosts an experimental implementation of v2 `encoding/json`. +The API is unstable and breaking changes will regularly be made. +Do not depend on this in publicly available modules. + +## Goals and objectives + +* **Mostly backwards compatible:** If possible, v2 should aim to be _mostly_ +compatible with v1 in terms of both API and default behavior to ease migration. +For example, the `Marshal` and `Unmarshal` functions are the most widely used +declarations in the v1 package. It seems sensible for equivalent functionality +in v2 to be named the same and have the same signature. +Behaviorally, we should aim for 95% to 99% backwards compatibility. +We do not aim for 100% compatibility since we want the freedom to break +certain behaviors that are now considered to have been a mistake. +We may provide options that can bring the v2 implementation to 100% compatibility, +but it will not be the default. + +* **More flexible:** There is a +[long list of feature requests](https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+encoding%2Fjson+in%3Atitle). +We should aim to provide the most flexible features that addresses most usages. +We do not want to over fit the v2 API to handle every possible use case. +Ideally, the features provided should be orthogonal in nature such that +any combination of features results in as few surprising edge cases as possible. + +* **More performant:** JSON serialization is widely used and any bit of extra +performance gains will be greatly appreciated. Some rarely used behaviors of v1 +may be dropped in favor of better performance. For example, +despite `Encoder` and `Decoder` operating on an `io.Writer` and `io.Reader`, +they do not operate in a truly streaming manner, +leading to a loss in performance. The v2 implementation should aim to be truly +streaming by default (see [#33714](https://golang.org/issue/33714)). + +* **Easy to use (hard to misuse):** The v2 API should aim to make +the common case easy and the less common case at least possible. +The API should avoid behavior that goes contrary to user expectation, +which may result in subtle bugs (see [#36225](https://golang.org/issue/36225)). + +* **v1 and v2 maintainability:** Since the v1 implementation must stay forever, +it would be beneficial if v1 could be implemented under the hood with v2, +allowing for less maintenance burden in the future. This probably implies that +behavioral changes in v2 relative to v1 need to be exposed as options. + +* **Avoid unsafe:** Standard library packages generally avoid the use of +package `unsafe` even if it could provide a performance boost. +We aim to preserve this property. + +## Expectations + +While this module aims to possibly be the v2 implementation of `encoding/json`, +there is no guarantee that this outcome will occur. As with any major change +to the Go standard library, this will eventually go through the +[Go proposal process](https://github.com/golang/proposal#readme). +At the present moment, this is still in the design and experimentation phase +and is not ready for a formal proposal. + +There are several possible outcomes from this experiment: +1. We determine that a v2 `encoding/json` would not provide sufficient benefit +over the existing v1 `encoding/json` package. Thus, we abandon this effort. +2. We propose a v2 `encoding/json` design, but it is rejected in favor of some +other design that is considered superior. +3. We propose a v2 `encoding/json` design, but rather than adding an entirely +new v2 `encoding/json` package, we decide to merge its functionality into +the existing v1 `encoding/json` package. +4. We propose a v2 `encoding/json` design and it is accepted, resulting in +its addition to the standard library. +5. Some other unforeseen outcome (among the infinite number of possibilities). + +## Development + +This module is primarily developed by +[@dsnet](https://github.com/dsnet), +[@mvdan](https://github.com/mvdan), and +[@johanbrandhorst](https://github.com/johanbrandhorst) +with feedback provided by +[@rogpeppe](https://github.com/rogpeppe), +[@ChrisHines](https://github.com/ChrisHines), and +[@rsc](https://github.com/rsc). + +Discussion about semantics occur semi-regularly, where a +[record of past meetings can be found here](https://docs.google.com/document/d/1rovrOTd-wTawGMPPlPuKhwXaYBg9VszTXR9AQQL5LfI/edit?usp=sharing). + +## Design overview + +This package aims to provide a clean separation between syntax and semantics. +Syntax deals with the structural representation of JSON (as specified in +[RFC 4627](https://tools.ietf.org/html/rfc4627), +[RFC 7159](https://tools.ietf.org/html/rfc7159), +[RFC 7493](https://tools.ietf.org/html/rfc7493), +[RFC 8259](https://tools.ietf.org/html/rfc8259), and +[RFC 8785](https://tools.ietf.org/html/rfc8785)). +Semantics deals with the meaning of syntactic data as usable application data. + +The `Encoder` and `Decoder` types are streaming tokenizers concerned with the +packing or parsing of JSON data. They operate on `Token` and `RawValue` types +which represent the common data structures that are representable in JSON. +`Encoder` and `Decoder` do not aim to provide any interpretation of the data. + +Functions like `Marshal`, `MarshalFull`, `MarshalNext`, `Unmarshal`, +`UnmarshalFull`, and `UnmarshalNext` provide semantic meaning by correlating +any arbitrary Go type with some JSON representation of that type (as stored in +data types like `[]byte`, `io.Writer`, `io.Reader`, `Encoder`, or `Decoder`). + +![API overview](api.png) + +This diagram provides a high-level overview of the v2 `json` package. +Purple blocks represent types, while blue blocks represent functions or methods. +The arrows and their direction represent the approximate flow of data. +The bottom half of the diagram contains functionality that is only concerned +with syntax, while the upper half contains functionality that assigns +semantic meaning to syntactic data handled by the bottom half. + +In contrast to v1 `encoding/json`, options are represented as separate types +rather than being setter methods on the `Encoder` or `Decoder` types. + +## Behavior changes + +The v2 `json` package changes the default behavior of `Marshal` and `Unmarshal` +relative to the v1 `json` package to be more sensible. +Some of these behavior changes have options and workarounds to opt into +behavior similar to what v1 provided. + +This table shows an overview of the changes: + +| v1 | v2 | Details | +| -- | -- | ------- | +| JSON object members are unmarshaled into a Go struct using a **case-insensitive name match**. | JSON object members are unmarshaled into a Go struct using a **case-sensitive name match**. | [CaseSensitivity](/diff_test.go#:~:text=TestCaseSensitivity) | +| When marshaling a Go struct, a struct field marked as `omitempty` is omitted if **the field value is an empty Go value**, which is defined as false, 0, a nil pointer, a nil interface value, and any empty array, slice, map, or string. | When marshaling a Go struct, a struct field marked as `omitempty` is omitted if **the field value would encode as an empty JSON value**, which is defined as a JSON null, or an empty JSON string, object, or array. | [OmitEmptyOption](/diff_test.go#:~:text=TestOmitEmptyOption) | +| The `string` option **does affect** Go bools. | The `string` option **does not affect** Go bools. | [StringOption](/diff_test.go#:~:text=TestStringOption) | +| The `string` option **does not recursively affect** sub-values of the Go field value. | The `string` option **does recursively affect** sub-values of the Go field value. | [StringOption](/diff_test.go#:~:text=TestStringOption) | +| The `string` option **sometimes accepts** a JSON null escaped within a JSON string. | The `string` option **never accepts** a JSON null escaped within a JSON string. | [StringOption](/diff_test.go#:~:text=TestStringOption) | +| A nil Go slice is marshaled as a **JSON null**. | A nil Go slice is marshaled as an **empty JSON array**. | [NilSlicesAndMaps](/diff_test.go#:~:text=TestNilSlicesAndMaps) | +| A nil Go map is marshaled as a **JSON null**. | A nil Go map is marshaled as an **empty JSON object**. | [NilSlicesAndMaps](/diff_test.go#:~:text=TestNilSlicesAndMaps) | +| A Go array may be unmarshaled from a **JSON array of any length**. | A Go array must be unmarshaled from a **JSON array of the same length**. | [Arrays](/diff_test.go#:~:text=Arrays) | +| A Go byte array is represented as a **JSON array of JSON numbers**. | A Go byte array is represented as a **Base64-encoded JSON string**. | [ByteArrays](/diff_test.go#:~:text=TestByteArrays) | +| `MarshalJSON` and `UnmarshalJSON` methods declared on a pointer receiver are **inconsistently called**. | `MarshalJSON` and `UnmarshalJSON` methods declared on a pointer receiver are **consistently called**. | [PointerReceiver](/diff_test.go#:~:text=TestPointerReceiver) | +| A Go map is marshaled in a **deterministic order**. | A Go map is marshaled in a **non-deterministic order**. | [MapDeterminism](/diff_test.go#:~:text=TestMapDeterminism) | +| JSON strings are encoded **with HTML-specific characters being escaped**. | JSON strings are encoded **without any characters being escaped** (unless necessary). | [EscapeHTML](/diff_test.go#:~:text=TestEscapeHTML) | +| When marshaling, invalid UTF-8 within a Go string **are silently replaced**. | When marshaling, invalid UTF-8 within a Go string **results in an error**. | [InvalidUTF8](/diff_test.go#:~:text=TestInvalidUTF8) | +| When unmarshaling, invalid UTF-8 within a JSON string **are silently replaced**. | When unmarshaling, invalid UTF-8 within a JSON string **results in an error**. | [InvalidUTF8](/diff_test.go#:~:text=TestInvalidUTF8) | +| When marshaling, **an error does not occur** if the output JSON value contains objects with duplicate names. | When marshaling, **an error does occur** if the output JSON value contains objects with duplicate names. | [DuplicateNames](/diff_test.go#:~:text=TestDuplicateNames) | +| When unmarshaling, **an error does not occur** if the input JSON value contains objects with duplicate names. | When unmarshaling, **an error does occur** if the input JSON value contains objects with duplicate names. | [DuplicateNames](/diff_test.go#:~:text=TestDuplicateNames) | +| Unmarshaling a JSON null into a non-empty Go value **inconsistently clears the value or does nothing**. | Unmarshaling a JSON null into a non-empty Go value **always clears the value**. | [MergeNull](/diff_test.go#:~:text=TestMergeNull) | +| Unmarshaling a JSON value into a non-empty Go value **follows inconsistent and bizarre behavior**. | Unmarshaling a JSON value into a non-empty Go value **always merges if the input is an object, and otherwise replaces**. | [MergeComposite](/diff_test.go#:~:text=TestMergeComposite) | +| A `time.Duration` is represented as a **JSON number containing the decimal number of nanoseconds**. | A `time.Duration` is represented as a **JSON string containing the formatted duration (e.g., "1h2m3.456s")**. | [TimeDurations](/diff_test.go#:~:text=TestTimeDurations) | +| Unmarshaling a JSON number into a Go float beyond its representation **results in an error**. | Unmarshaling a JSON number into a Go float beyond its representation **uses the closest representable value (e.g., ±`math.MaxFloat`)**. | [MaxFloats](/diff_test.go#:~:text=TestMaxFloats) | +| A Go struct with only unexported fields **can be serialized**. | A Go struct with only unexported fields **cannot be serialized**. | [EmptyStructs](/diff_test.go#:~:text=TestEmptyStructs) | +| A Go struct that embeds an unexported struct type **can sometimes be serialized**. | A Go struct that embeds an unexported struct type **cannot be serialized**. | [EmbedUnexported](/diff_test.go#:~:text=TestEmbedUnexported) | + +See [diff_test.go](/diff_test.go) for details about every change. + +## Performance + +One of the goals of the v2 module is to be more performant than v1. + +Each of the charts below show the performance across +several different JSON implementations: + +* `JSONv1` is `encoding/json` at `v1.18.2` +* `JSONv2` is `github.com/go-json-experiment/json` at `v0.0.0-20220524042235-dd8be80fc4a7` +* `JSONIterator` is `github.com/json-iterator/go` at `v1.1.12` +* `SegmentJSON` is `github.com/segmentio/encoding/json` at `v0.3.5` +* `GoJSON` is `github.com/goccy/go-json` at `v0.9.7` +* `SonicJSON` is `github.com/bytedance/sonic` at `v1.3.0` + +Benchmarks were run across various datasets: + +* `CanadaGeometry` is a GeoJSON (RFC 7946) representation of Canada. + It contains many JSON arrays of arrays of two-element arrays of numbers. +* `CITMCatalog` contains many JSON objects using numeric names. +* `SyntheaFHIR` is sample JSON data from the healthcare industry. + It contains many nested JSON objects with mostly string values, + where the set of unique string values is relatively small. +* `TwitterStatus` is the JSON response from the Twitter API. + It contains a mix of all different JSON kinds, where string values + are a mix of both single-byte ASCII and multi-byte Unicode. +* `GolangSource` is a simple tree representing the Go source code. + It contains many nested JSON objects, each with the same schema. +* `StringUnicode` contains many strings with multi-byte Unicode runes. + +All of the implementations other than `JSONv1` and `JSONv2` make +extensive use of `unsafe`. As such, we expect those to generally be faster, +but at the cost of memory and type safety. `SonicJSON` goes a step even further +and uses just-in-time compilation to generate machine code specialized +for the Go type being marshaled or unmarshaled. +Also, `SonicJSON` does not validate JSON strings for valid UTF-8, +and so gains a notable performance boost on datasets with multi-byte Unicode. +Benchmarks are performed based on the default marshal and unmarshal behavior +of each package. Note that `JSONv2` aims to be safe and correct by default, +which may not be the most performant strategy. + +`JSONv2` has several semantic changes relative to `JSONv1` that +impacts performance: + +1. When marshaling, `JSONv2` no longer sorts the keys of a Go map. + This will improve performance. +2. When marshaling or unmarshaling, `JSONv2` always checks + to make sure JSON object names are unique. + This will hurt performance, but is more correct. +3. When marshaling or unmarshaling, `JSONv2` always + shallow copies the underlying value for a Go interface and + shallow copies the key and value for entries in a Go map. + This is done to keep the value as addressable so that `JSONv2` can + call methods and functions that operate on a pointer receiver. + This will hurt performance, but is more correct. + +All of the charts are unit-less since the values are normalized +relative to `JSONv1`, which is why `JSONv1` always has a value of 1. +A lower value is better (i.e., runs faster). + +Benchmarks were performed on an AMD Ryzen 9 5900X. + +The code for the benchmarks is located at +https://github.com/go-json-experiment/jsonbench. + +### Marshal Performance + +#### Concrete types + +![Benchmark Marshal Concrete](benchmark-marshal-concrete.png) + +* This compares marshal performance when serializing + [from concrete types](/testdata_test.go). +* The `JSONv1` implementation is close to optimal (without the use of `unsafe`). +* Relative to `JSONv1`, `JSONv2` is generally as fast or slightly faster. +* Relative to `JSONIterator`, `JSONv2` is up to 1.3x faster. +* Relative to `SegmentJSON`, `JSONv2` is up to 1.8x slower. +* Relative to `GoJSON`, `JSONv2` is up to 2.0x slower. +* Relative to `SonicJSON`, `JSONv2` is about 1.8x to 3.2x slower + (ignoring `StringUnicode` since `SonicJSON` does not validate UTF-8). +* For `JSONv1` and `JSONv2`, marshaling from concrete types is + mostly limited by the performance of Go reflection. + +#### Interface types + +![Benchmark Marshal Interface](benchmark-marshal-interface.png) + +* This compares marshal performance when serializing from + `any`, `map[string]any`, and `[]any` types. +* Relative to `JSONv1`, `JSONv2` is about 1.5x to 4.2x faster. +* Relative to `JSONIterator`, `JSONv2` is about 1.1x to 2.4x faster. +* Relative to `SegmentJSON`, `JSONv2` is about 1.2x to 1.8x faster. +* Relative to `GoJSON`, `JSONv2` is about 1.1x to 2.5x faster. +* Relative to `SonicJSON`, `JSONv2` is up to 1.5x slower + (ignoring `StringUnicode` since `SonicJSON` does not validate UTF-8). +* `JSONv2` is faster than the alternatives. + One advantange is because it does not sort the keys for a `map[string]any`, + while alternatives (except `SonicJSON` and `JSONIterator`) do sort the keys. + +#### RawValue types + +![Benchmark Marshal Rawvalue](benchmark-marshal-rawvalue.png) + +* This compares performance when marshaling from a `json.RawValue`. + This mostly exercises the underlying encoder and + hides the cost of Go reflection. +* Relative to `JSONv1`, `JSONv2` is about 3.5x to 7.8x faster. +* `JSONIterator` is blazingly fast because + [it does not validate whether the raw value is valid](https://go.dev/play/p/bun9IXQCKRe) + and simply copies it to the output. +* Relative to `SegmentJSON`, `JSONv2` is about 1.5x to 2.7x faster. +* Relative to `GoJSON`, `JSONv2` is up to 2.2x faster. +* Relative to `SonicJSON`, `JSONv2` is up to 1.5x faster. +* Aside from `JSONIterator`, `JSONv2` is generally the fastest. + +### Unmarshal Performance + +#### Concrete types + +![Benchmark Unmarshal Concrete](benchmark-unmarshal-concrete.png) + +* This compares unmarshal performance when deserializing + [into concrete types](/testdata_test.go). +* Relative to `JSONv1`, `JSONv2` is about 1.8x to 5.7x faster. +* Relative to `JSONIterator`, `JSONv2` is about 1.1x to 1.6x slower. +* Relative to `SegmentJSON`, `JSONv2` is up to 2.5x slower. +* Relative to `GoJSON`, `JSONv2` is about 1.4x to 2.1x slower. +* Relative to `SonicJSON`, `JSONv2` is up to 4.0x slower + (ignoring `StringUnicode` since `SonicJSON` does not validate UTF-8). +* For `JSONv1` and `JSONv2`, unmarshaling into concrete types is + mostly limited by the performance of Go reflection. + +#### Interface types + +![Benchmark Unmarshal Interface](benchmark-unmarshal-interface.png) + +* This compares unmarshal performance when deserializing into + `any`, `map[string]any`, and `[]any` types. +* Relative to `JSONv1`, `JSONv2` is about 1.tx to 4.3x faster. +* Relative to `JSONIterator`, `JSONv2` is up to 1.5x faster. +* Relative to `SegmentJSON`, `JSONv2` is about 1.5 to 3.7x faster. +* Relative to `GoJSON`, `JSONv2` is up to 1.3x faster. +* Relative to `SonicJSON`, `JSONv2` is up to 1.5x slower + (ignoring `StringUnicode` since `SonicJSON` does not validate UTF-8). +* Aside from `SonicJSON`, `JSONv2` is generally just as fast + or faster than all the alternatives. + +#### RawValue types + +![Benchmark Unmarshal Rawvalue](benchmark-unmarshal-rawvalue.png) + +* This compares performance when unmarshaling into a `json.RawValue`. + This mostly exercises the underlying decoder and + hides away most of the cost of Go reflection. +* Relative to `JSONv1`, `JSONv2` is about 8.3x to 17.0x faster. +* Relative to `JSONIterator`, `JSONv2` is up to 2.0x faster. +* Relative to `SegmentJSON`, `JSONv2` is up to 1.6x faster or 1.7x slower. +* Relative to `GoJSON`, `JSONv2` is up to 1.9x faster or 2.1x slower. +* Relative to `SonicJSON`, `JSONv2` is up to 2.0x faster + (ignoring `StringUnicode` since `SonicJSON` does not validate UTF-8). +* `JSONv1` takes a + [lexical scanning approach](https://talks.golang.org/2011/lex.slide#1), + which performs a virtual function call for every byte of input. + In contrast, `JSONv2` makes heavy use of iterative and linear parsing logic + (with extra complexity to resume parsing when encountering segmented buffers). +* `JSONv2` is comparable to the alternatives that use `unsafe`. + Generally it is faster, but sometimes it is slower. diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go new file mode 100644 index 000000000..e6c6216ff --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go @@ -0,0 +1,513 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "errors" + "io" + "reflect" + "sync" +) + +// MarshalOptions configures how Go data is serialized as JSON data. +// The zero value is equivalent to the default marshal settings. +type MarshalOptions struct { + requireKeyedLiterals + nonComparable + + // Marshalers is a list of type-specific marshalers to use. + Marshalers *Marshalers + + // StringifyNumbers specifies that numeric Go types should be serialized + // as a JSON string containing the equivalent JSON number value. + // + // According to RFC 8259, section 6, a JSON implementation may choose to + // limit the representation of a JSON number to an IEEE 754 binary64 value. + // This may cause decoders to lose precision for int64 and uint64 types. + // Escaping JSON numbers as a JSON string preserves the exact precision. + StringifyNumbers bool + + // DiscardUnknownMembers specifies that marshaling should ignore any + // JSON object members stored in Go struct fields dedicated to storing + // unknown JSON object members. + DiscardUnknownMembers bool + + // Deterministic specifies that the same input value will be serialized + // as the exact same output bytes. Different processes of + // the same program will serialize equal values to the same bytes, + // but different versions of the same program are not guaranteed + // to produce the exact same sequence of bytes. + Deterministic bool + + // formatDepth is the depth at which we respect the format flag. + formatDepth int + // format is custom formatting for the value at the specified depth. + format string +} + +// Marshal serializes a Go value as a []byte with default options. +// It is a thin wrapper over MarshalOptions.Marshal. +func Marshal(in any) (out []byte, err error) { + return MarshalOptions{}.Marshal(EncodeOptions{}, in) +} + +// MarshalFull serializes a Go value into an io.Writer with default options. +// It is a thin wrapper over MarshalOptions.MarshalFull. +func MarshalFull(out io.Writer, in any) error { + return MarshalOptions{}.MarshalFull(EncodeOptions{}, out, in) +} + +// Marshal serializes a Go value as a []byte according to the provided +// marshal and encode options. It does not terminate the output with a newline. +// See MarshalNext for details about the conversion of a Go value into JSON. +func (mo MarshalOptions) Marshal(eo EncodeOptions, in any) (out []byte, err error) { + enc := getBufferedEncoder(eo) + defer putBufferedEncoder(enc) + enc.options.omitTopLevelNewline = true + err = mo.MarshalNext(enc, in) + // TODO(https://go.dev/issue/45038): Use bytes.Clone. + return append([]byte(nil), enc.buf...), err +} + +// MarshalFull serializes a Go value into an io.Writer according to the provided +// marshal and encode options. It does not terminate the output with a newline. +// See MarshalNext for details about the conversion of a Go value into JSON. +func (mo MarshalOptions) MarshalFull(eo EncodeOptions, out io.Writer, in any) error { + enc := getStreamingEncoder(out, eo) + defer putStreamingEncoder(enc) + enc.options.omitTopLevelNewline = true + err := mo.MarshalNext(enc, in) + return err +} + +// MarshalNext encodes a Go value as the next JSON value according to +// the provided marshal options. +// +// Type-specific marshal functions and methods take precedence +// over the default representation of a value. +// Functions or methods that operate on *T are only called when encoding +// a value of type T (by taking its address) or a non-nil value of *T. +// MarshalNext ensures that a value is always addressable +// (by boxing it on the heap if necessary) so that +// these functions and methods can be consistently called. For performance, +// it is recommended that MarshalNext be passed a non-nil pointer to the value. +// +// The input value is encoded as JSON according the following rules: +// +// - If any type-specific functions in MarshalOptions.Marshalers match +// the value type, then those functions are called to encode the value. +// If all applicable functions return SkipFunc, +// then the value is encoded according to subsequent rules. +// +// - If the value type implements MarshalerV2, +// then the MarshalNextJSON method is called to encode the value. +// +// - If the value type implements MarshalerV1, +// then the MarshalJSON method is called to encode the value. +// +// - If the value type implements encoding.TextMarshaler, +// then the MarshalText method is called to encode the value and +// subsequently encode its result as a JSON string. +// +// - Otherwise, the value is encoded according to the value's type +// as described in detail below. +// +// Most Go types have a default JSON representation. +// Certain types support specialized formatting according to +// a format flag optionally specified in the Go struct tag +// for the struct field that contains the current value +// (see the “JSON Representation of Go structs” section for more details). +// +// The representation of each type is as follows: +// +// - A Go boolean is encoded as a JSON boolean (e.g., true or false). +// It does not support any custom format flags. +// +// - A Go string is encoded as a JSON string. +// It does not support any custom format flags. +// +// - A Go []byte or [N]byte is encoded as a JSON string containing +// the binary value encoded using RFC 4648. +// If the format is "base64" or unspecified, then this uses RFC 4648, section 4. +// If the format is "base64url", then this uses RFC 4648, section 5. +// If the format is "base32", then this uses RFC 4648, section 6. +// If the format is "base32hex", then this uses RFC 4648, section 7. +// If the format is "base16" or "hex", then this uses RFC 4648, section 8. +// If the format is "array", then the bytes value is encoded as a JSON array +// where each byte is recursively JSON-encoded as each JSON array element. +// +// - A Go integer is encoded as a JSON number without fractions or exponents. +// If MarshalOptions.StringifyNumbers is specified, then the JSON number is +// encoded within a JSON string. It does not support any custom format +// flags. +// +// - A Go float is encoded as a JSON number. +// If MarshalOptions.StringifyNumbers is specified, +// then the JSON number is encoded within a JSON string. +// If the format is "nonfinite", then NaN, +Inf, and -Inf are encoded as +// the JSON strings "NaN", "Infinity", and "-Infinity", respectively. +// Otherwise, the presence of non-finite numbers results in a SemanticError. +// +// - A Go map is encoded as a JSON object, where each Go map key and value +// is recursively encoded as a name and value pair in the JSON object. +// The Go map key must encode as a JSON string, otherwise this results +// in a SemanticError. When encoding keys, MarshalOptions.StringifyNumbers +// is automatically applied so that numeric keys encode as JSON strings. +// The Go map is traversed in a non-deterministic order. +// For deterministic encoding, consider using RawValue.Canonicalize. +// If the format is "emitnull", then a nil map is encoded as a JSON null. +// Otherwise by default, a nil map is encoded as an empty JSON object. +// +// - A Go struct is encoded as a JSON object. +// See the “JSON Representation of Go structs” section +// in the package-level documentation for more details. +// +// - A Go slice is encoded as a JSON array, where each Go slice element +// is recursively JSON-encoded as the elements of the JSON array. +// If the format is "emitnull", then a nil slice is encoded as a JSON null. +// Otherwise by default, a nil slice is encoded as an empty JSON array. +// +// - A Go array is encoded as a JSON array, where each Go array element +// is recursively JSON-encoded as the elements of the JSON array. +// The JSON array length is always identical to the Go array length. +// It does not support any custom format flags. +// +// - A Go pointer is encoded as a JSON null if nil, otherwise it is +// the recursively JSON-encoded representation of the underlying value. +// Format flags are forwarded to the encoding of the underlying value. +// +// - A Go interface is encoded as a JSON null if nil, otherwise it is +// the recursively JSON-encoded representation of the underlying value. +// It does not support any custom format flags. +// +// - A Go time.Time is encoded as a JSON string containing the timestamp +// formatted in RFC 3339 with nanosecond resolution. +// If the format matches one of the format constants declared +// in the time package (e.g., RFC1123), then that format is used. +// Otherwise, the format is used as-is with time.Time.Format if non-empty. +// +// - A Go time.Duration is encoded as a JSON string containing the duration +// formatted according to time.Duration.String. +// If the format is "nanos", it is encoded as a JSON number +// containing the number of nanoseconds in the duration. +// +// - All other Go types (e.g., complex numbers, channels, and functions) +// have no default representation and result in a SemanticError. +// +// JSON cannot represent cyclic data structures and +// MarshalNext does not handle them. +// Passing cyclic structures will result in an error. +func (mo MarshalOptions) MarshalNext(out *Encoder, in any) error { + v := reflect.ValueOf(in) + if !v.IsValid() || (v.Kind() == reflect.Pointer && v.IsNil()) { + return out.WriteToken(Null) + } + // Shallow copy non-pointer values to obtain an addressable value. + // It is beneficial to performance to always pass pointers to avoid this. + if v.Kind() != reflect.Pointer { + v2 := reflect.New(v.Type()) + v2.Elem().Set(v) + v = v2 + } + va := addressableValue{v.Elem()} // dereferenced pointer is always addressable + t := va.Type() + + // Lookup and call the marshal function for this type. + marshal := lookupArshaler(t).marshal + if mo.Marshalers != nil { + marshal, _ = mo.Marshalers.lookup(marshal, t) + } + if err := marshal(mo, out, va); err != nil { + if !out.options.AllowDuplicateNames { + out.tokens.invalidateDisabledNamespaces() + } + return err + } + return nil +} + +// UnmarshalOptions configures how JSON data is deserialized as Go data. +// The zero value is equivalent to the default unmarshal settings. +type UnmarshalOptions struct { + requireKeyedLiterals + nonComparable + + // Unmarshalers is a list of type-specific unmarshalers to use. + Unmarshalers *Unmarshalers + + // StringifyNumbers specifies that numeric Go types can be deserialized + // from either a JSON number or a JSON string containing a JSON number + // without any surrounding whitespace. + StringifyNumbers bool + + // RejectUnknownMembers specifies that unknown members should be rejected + // when unmarshaling a JSON object, regardless of whether there is a field + // to store unknown members. + RejectUnknownMembers bool + + // formatDepth is the depth at which we respect the format flag. + formatDepth int + // format is custom formatting for the value at the specified depth. + format string +} + +// Unmarshal deserializes a Go value from a []byte with default options. +// It is a thin wrapper over UnmarshalOptions.Unmarshal. +func Unmarshal(in []byte, out any) error { + return UnmarshalOptions{}.Unmarshal(DecodeOptions{}, in, out) +} + +// UnmarshalFull deserializes a Go value from an io.Reader with default options. +// It is a thin wrapper over UnmarshalOptions.UnmarshalFull. +func UnmarshalFull(in io.Reader, out any) error { + return UnmarshalOptions{}.UnmarshalFull(DecodeOptions{}, in, out) +} + +// Unmarshal deserializes a Go value from a []byte according to the +// provided unmarshal and decode options. The output must be a non-nil pointer. +// The input must be a single JSON value with optional whitespace interspersed. +// See UnmarshalNext for details about the conversion of JSON into a Go value. +func (uo UnmarshalOptions) Unmarshal(do DecodeOptions, in []byte, out any) error { + dec := getBufferedDecoder(in, do) + defer putBufferedDecoder(dec) + return uo.unmarshalFull(dec, out) +} + +// UnmarshalFull deserializes a Go value from an io.Reader according to the +// provided unmarshal and decode options. The output must be a non-nil pointer. +// The input must be a single JSON value with optional whitespace interspersed. +// It consumes the entirety of io.Reader until io.EOF is encountered. +// See UnmarshalNext for details about the conversion of JSON into a Go value. +func (uo UnmarshalOptions) UnmarshalFull(do DecodeOptions, in io.Reader, out any) error { + dec := getStreamingDecoder(in, do) + defer putStreamingDecoder(dec) + return uo.unmarshalFull(dec, out) +} +func (uo UnmarshalOptions) unmarshalFull(in *Decoder, out any) error { + switch err := uo.UnmarshalNext(in, out); err { + case nil: + return in.checkEOF() + case io.EOF: + return io.ErrUnexpectedEOF + default: + return err + } +} + +// UnmarshalNext decodes the next JSON value into a Go value according to +// the provided unmarshal options. The output must be a non-nil pointer. +// +// Type-specific unmarshal functions and methods take precedence +// over the default representation of a value. +// Functions or methods that operate on *T are only called when decoding +// a value of type T (by taking its address) or a non-nil value of *T. +// UnmarshalNext ensures that a value is always addressable +// (by boxing it on the heap if necessary) so that +// these functions and methods can be consistently called. +// +// The input is decoded into the output according the following rules: +// +// - If any type-specific functions in UnmarshalOptions.Unmarshalers match +// the value type, then those functions are called to decode the JSON +// value. If all applicable functions return SkipFunc, +// then the input is decoded according to subsequent rules. +// +// - If the value type implements UnmarshalerV2, +// then the UnmarshalNextJSON method is called to decode the JSON value. +// +// - If the value type implements UnmarshalerV1, +// then the UnmarshalJSON method is called to decode the JSON value. +// +// - If the value type implements encoding.TextUnmarshaler, +// then the input is decoded as a JSON string and +// the UnmarshalText method is called with the decoded string value. +// This fails with a SemanticError if the input is not a JSON string. +// +// - Otherwise, the JSON value is decoded according to the value's type +// as described in detail below. +// +// Most Go types have a default JSON representation. +// Certain types support specialized formatting according to +// a format flag optionally specified in the Go struct tag +// for the struct field that contains the current value +// (see the “JSON Representation of Go structs” section for more details). +// A JSON null may be decoded into every supported Go value where +// it is equivalent to storing the zero value of the Go value. +// If the input JSON kind is not handled by the current Go value type, +// then this fails with a SemanticError. Unless otherwise specified, +// the decoded value replaces any pre-existing value. +// +// The representation of each type is as follows: +// +// - A Go boolean is decoded from a JSON boolean (e.g., true or false). +// It does not support any custom format flags. +// +// - A Go string is decoded from a JSON string. +// It does not support any custom format flags. +// +// - A Go []byte or [N]byte is decoded from a JSON string +// containing the binary value encoded using RFC 4648. +// If the format is "base64" or unspecified, then this uses RFC 4648, section 4. +// If the format is "base64url", then this uses RFC 4648, section 5. +// If the format is "base32", then this uses RFC 4648, section 6. +// If the format is "base32hex", then this uses RFC 4648, section 7. +// If the format is "base16" or "hex", then this uses RFC 4648, section 8. +// If the format is "array", then the Go slice or array is decoded from a +// JSON array where each JSON element is recursively decoded for each byte. +// When decoding into a non-nil []byte, the slice length is reset to zero +// and the decoded input is appended to it. +// When decoding into a [N]byte, the input must decode to exactly N bytes, +// otherwise it fails with a SemanticError. +// +// - A Go integer is decoded from a JSON number. +// It may also be decoded from a JSON string containing a JSON number +// if UnmarshalOptions.StringifyNumbers is specified. +// It fails with a SemanticError if the JSON number +// has a fractional or exponent component. +// It also fails if it overflows the representation of the Go integer type. +// It does not support any custom format flags. +// +// - A Go float is decoded from a JSON number. +// It may also be decoded from a JSON string containing a JSON number +// if UnmarshalOptions.StringifyNumbers is specified. +// The JSON number is parsed as the closest representable Go float value. +// If the format is "nonfinite", then the JSON strings +// "NaN", "Infinity", and "-Infinity" are decoded as NaN, +Inf, and -Inf. +// Otherwise, the presence of such strings results in a SemanticError. +// +// - A Go map is decoded from a JSON object, +// where each JSON object name and value pair is recursively decoded +// as the Go map key and value. When decoding keys, +// UnmarshalOptions.StringifyNumbers is automatically applied so that +// numeric keys can decode from JSON strings. Maps are not cleared. +// If the Go map is nil, then a new map is allocated to decode into. +// If the decoded key matches an existing Go map entry, the entry value +// is reused by decoding the JSON object value into it. +// The only supported format is "emitnull" and has no effect when decoding. +// +// - A Go struct is decoded from a JSON object. +// See the “JSON Representation of Go structs” section +// in the package-level documentation for more details. +// +// - A Go slice is decoded from a JSON array, where each JSON element +// is recursively decoded and appended to the Go slice. +// Before appending into a Go slice, a new slice is allocated if it is nil, +// otherwise the slice length is reset to zero. +// The only supported format is "emitnull" and has no effect when decoding. +// +// - A Go array is decoded from a JSON array, where each JSON array element +// is recursively decoded as each corresponding Go array element. +// Each Go array element is zeroed before decoding into it. +// It fails with a SemanticError if the JSON array does not contain +// the exact same number of elements as the Go array. +// It does not support any custom format flags. +// +// - A Go pointer is decoded based on the JSON kind and underlying Go type. +// If the input is a JSON null, then this stores a nil pointer. +// Otherwise, it allocates a new underlying value if the pointer is nil, +// and recursively JSON decodes into the underlying value. +// Format flags are forwarded to the decoding of the underlying type. +// +// - A Go interface is decoded based on the JSON kind and underlying Go type. +// If the input is a JSON null, then this stores a nil interface value. +// Otherwise, a nil interface value of an empty interface type is initialized +// with a zero Go bool, string, float64, map[string]any, or []any if the +// input is a JSON boolean, string, number, object, or array, respectively. +// If the interface value is still nil, then this fails with a SemanticError +// since decoding could not determine an appropriate Go type to decode into. +// For example, unmarshaling into a nil io.Reader fails since +// there is no concrete type to populate the interface value with. +// Otherwise an underlying value exists and it recursively decodes +// the JSON input into it. It does not support any custom format flags. +// +// - A Go time.Time is decoded from a JSON string containing the time +// formatted in RFC 3339 with nanosecond resolution. +// If the format matches one of the format constants declared in +// the time package (e.g., RFC1123), then that format is used for parsing. +// Otherwise, the format is used as-is with time.Time.Parse if non-empty. +// +// - A Go time.Duration is decoded from a JSON string by +// passing the decoded string to time.ParseDuration. +// If the format is "nanos", it is instead decoded from a JSON number +// containing the number of nanoseconds in the duration. +// +// - All other Go types (e.g., complex numbers, channels, and functions) +// have no default representation and result in a SemanticError. +// +// In general, unmarshaling follows merge semantics (similar to RFC 7396) +// where the decoded Go value replaces the destination value +// for any JSON kind other than an object. +// For JSON objects, the input object is merged into the destination value +// where matching object members recursively apply merge semantics. +func (uo UnmarshalOptions) UnmarshalNext(in *Decoder, out any) error { + v := reflect.ValueOf(out) + if !v.IsValid() || v.Kind() != reflect.Pointer || v.IsNil() { + var t reflect.Type + if v.IsValid() { + t = v.Type() + if t.Kind() == reflect.Pointer { + t = t.Elem() + } + } + err := errors.New("value must be passed as a non-nil pointer reference") + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + va := addressableValue{v.Elem()} // dereferenced pointer is always addressable + t := va.Type() + + // Lookup and call the unmarshal function for this type. + unmarshal := lookupArshaler(t).unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, t) + } + if err := unmarshal(uo, in, va); err != nil { + if !in.options.AllowDuplicateNames { + in.tokens.invalidateDisabledNamespaces() + } + return err + } + return nil +} + +// addressableValue is a reflect.Value that is guaranteed to be addressable +// such that calling the Addr and Set methods do not panic. +// +// There is no compile magic that enforces this property, +// but rather the need to construct this type makes it easier to examine each +// construction site to ensure that this property is upheld. +type addressableValue struct{ reflect.Value } + +// newAddressableValue constructs a new addressable value of type t. +func newAddressableValue(t reflect.Type) addressableValue { + return addressableValue{reflect.New(t).Elem()} +} + +// All marshal and unmarshal behavior is implemented using these signatures. +type ( + marshaler = func(MarshalOptions, *Encoder, addressableValue) error + unmarshaler = func(UnmarshalOptions, *Decoder, addressableValue) error +) + +type arshaler struct { + marshal marshaler + unmarshal unmarshaler + nonDefault bool +} + +var lookupArshalerCache sync.Map // map[reflect.Type]*arshaler + +func lookupArshaler(t reflect.Type) *arshaler { + if v, ok := lookupArshalerCache.Load(t); ok { + return v.(*arshaler) + } + + fncs := makeDefaultArshaler(t) + fncs = makeMethodArshaler(fncs, t) + fncs = makeTimeArshaler(fncs, t) + + // Use the last stored so that duplicate arshalers can be garbage collected. + v, _ := lookupArshalerCache.LoadOrStore(t, fncs) + return v.(*arshaler) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go new file mode 100644 index 000000000..c62b1f320 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go @@ -0,0 +1,238 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import "reflect" + +// This files contains an optimized marshal and unmarshal implementation +// for the any type. This type is often used when the Go program has +// no knowledge of the JSON schema. This is a common enough occurrence +// to justify the complexity of adding logic for this. + +func marshalValueAny(mo MarshalOptions, enc *Encoder, val any) error { + switch val := val.(type) { + case nil: + return enc.WriteToken(Null) + case bool: + return enc.WriteToken(Bool(val)) + case string: + return enc.WriteToken(String(val)) + case float64: + return enc.WriteToken(Float(val)) + case map[string]any: + return marshalObjectAny(mo, enc, val) + case []any: + return marshalArrayAny(mo, enc, val) + default: + v := newAddressableValue(reflect.TypeOf(val)) + v.Set(reflect.ValueOf(val)) + marshal := lookupArshaler(v.Type()).marshal + if mo.Marshalers != nil { + marshal, _ = mo.Marshalers.lookup(marshal, v.Type()) + } + return marshal(mo, enc, v) + } +} + +func unmarshalValueAny(uo UnmarshalOptions, dec *Decoder) (any, error) { + switch k := dec.PeekKind(); k { + case '{': + return unmarshalObjectAny(uo, dec) + case '[': + return unmarshalArrayAny(uo, dec) + default: + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return nil, err + } + switch val.Kind() { + case 'n': + return nil, nil + case 'f': + return false, nil + case 't': + return true, nil + case '"': + val = unescapeStringMayCopy(val, flags.isVerbatim()) + if dec.stringCache == nil { + dec.stringCache = new(stringCache) + } + return dec.stringCache.make(val), nil + case '0': + fv, _ := parseFloat(val, 64) // ignore error since readValue guarantees val is valid + return fv, nil + default: + panic("BUG: invalid kind: " + k.String()) + } + } +} + +func marshalObjectAny(mo MarshalOptions, enc *Encoder, obj map[string]any) error { + // Check for cycles. + if enc.tokens.depth() > startDetectingCyclesAfter { + v := reflect.ValueOf(obj) + if err := enc.seenPointers.visit(v); err != nil { + return err + } + defer enc.seenPointers.leave(v) + } + + // Optimize for marshaling an empty map without any preceding whitespace. + if len(obj) == 0 && !enc.options.multiline && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '{') + enc.buf = append(enc.buf, "{}"...) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + if err := enc.WriteToken(ObjectStart); err != nil { + return err + } + // A Go map guarantees that each entry has a unique key + // The only possibility of duplicates is due to invalid UTF-8. + if !enc.options.AllowInvalidUTF8 { + enc.tokens.last.disableNamespace() + } + if !mo.Deterministic || len(obj) <= 1 { + for name, val := range obj { + if err := enc.WriteToken(String(name)); err != nil { + return err + } + if err := marshalValueAny(mo, enc, val); err != nil { + return err + } + } + } else { + names := getStrings(len(obj)) + var i int + for name := range obj { + (*names)[i] = name + i++ + } + names.Sort() + for _, name := range *names { + if err := enc.WriteToken(String(name)); err != nil { + return err + } + if err := marshalValueAny(mo, enc, obj[name]); err != nil { + return err + } + } + putStrings(names) + } + if err := enc.WriteToken(ObjectEnd); err != nil { + return err + } + return nil +} + +func unmarshalObjectAny(uo UnmarshalOptions, dec *Decoder) (map[string]any, error) { + tok, err := dec.ReadToken() + if err != nil { + return nil, err + } + k := tok.Kind() + switch k { + case 'n': + return nil, nil + case '{': + obj := make(map[string]any) + // A Go map guarantees that each entry has a unique key + // The only possibility of duplicates is due to invalid UTF-8. + if !dec.options.AllowInvalidUTF8 { + dec.tokens.last.disableNamespace() + } + for dec.PeekKind() != '}' { + tok, err := dec.ReadToken() + if err != nil { + return obj, err + } + name := tok.String() + + // Manually check for duplicate names. + if _, ok := obj[name]; ok { + name := dec.previousBuffer() + err := &SyntacticError{str: "duplicate name " + string(name) + " in object"} + return obj, err.withOffset(dec.InputOffset() - int64(len(name))) + } + + val, err := unmarshalValueAny(uo, dec) + obj[name] = val + if err != nil { + return obj, err + } + } + if _, err := dec.ReadToken(); err != nil { + return obj, err + } + return obj, nil + } + return nil, &SemanticError{action: "unmarshal", JSONKind: k, GoType: mapStringAnyType} +} + +func marshalArrayAny(mo MarshalOptions, enc *Encoder, arr []any) error { + // Check for cycles. + if enc.tokens.depth() > startDetectingCyclesAfter { + v := reflect.ValueOf(arr) + if err := enc.seenPointers.visit(v); err != nil { + return err + } + defer enc.seenPointers.leave(v) + } + + // Optimize for marshaling an empty slice without any preceding whitespace. + if len(arr) == 0 && !enc.options.multiline && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '[') + enc.buf = append(enc.buf, "[]"...) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + if err := enc.WriteToken(ArrayStart); err != nil { + return err + } + for _, val := range arr { + if err := marshalValueAny(mo, enc, val); err != nil { + return err + } + } + if err := enc.WriteToken(ArrayEnd); err != nil { + return err + } + return nil +} + +func unmarshalArrayAny(uo UnmarshalOptions, dec *Decoder) ([]any, error) { + tok, err := dec.ReadToken() + if err != nil { + return nil, err + } + k := tok.Kind() + switch k { + case 'n': + return nil, nil + case '[': + arr := []any{} + for dec.PeekKind() != ']' { + val, err := unmarshalValueAny(uo, dec) + arr = append(arr, val) + if err != nil { + return arr, err + } + } + if _, err := dec.ReadToken(); err != nil { + return arr, err + } + return arr, nil + } + return nil, &SemanticError{action: "unmarshal", JSONKind: k, GoType: sliceAnyType} +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go new file mode 100644 index 000000000..fd26eba35 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go @@ -0,0 +1,1485 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "encoding/base32" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "sync" +) + +// optimizeCommon specifies whether to use optimizations targeted for certain +// common patterns, rather than using the slower, but more general logic. +// All tests should pass regardless of whether this is true or not. +const optimizeCommon = true + +var ( + // Most natural Go type that correspond with each JSON type. + anyType = reflect.TypeOf((*any)(nil)).Elem() // JSON value + boolType = reflect.TypeOf((*bool)(nil)).Elem() // JSON bool + stringType = reflect.TypeOf((*string)(nil)).Elem() // JSON string + float64Type = reflect.TypeOf((*float64)(nil)).Elem() // JSON number + mapStringAnyType = reflect.TypeOf((*map[string]any)(nil)).Elem() // JSON object + sliceAnyType = reflect.TypeOf((*[]any)(nil)).Elem() // JSON array + + bytesType = reflect.TypeOf((*[]byte)(nil)).Elem() + emptyStructType = reflect.TypeOf((*struct{})(nil)).Elem() +) + +const startDetectingCyclesAfter = 1000 + +type seenPointers map[typedPointer]struct{} + +type typedPointer struct { + typ reflect.Type + ptr any // always stores unsafe.Pointer, but avoids depending on unsafe +} + +// visit visits pointer p of type t, reporting an error if seen before. +// If successfully visited, then the caller must eventually call leave. +func (m *seenPointers) visit(v reflect.Value) error { + p := typedPointer{v.Type(), v.UnsafePointer()} + if _, ok := (*m)[p]; ok { + return &SemanticError{action: "marshal", GoType: p.typ, Err: errors.New("encountered a cycle")} + } + if *m == nil { + *m = make(map[typedPointer]struct{}) + } + (*m)[p] = struct{}{} + return nil +} +func (m *seenPointers) leave(v reflect.Value) { + p := typedPointer{v.Type(), v.UnsafePointer()} + delete(*m, p) +} + +func makeDefaultArshaler(t reflect.Type) *arshaler { + switch t.Kind() { + case reflect.Bool: + return makeBoolArshaler(t) + case reflect.String: + return makeStringArshaler(t) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return makeIntArshaler(t) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return makeUintArshaler(t) + case reflect.Float32, reflect.Float64: + return makeFloatArshaler(t) + case reflect.Map: + return makeMapArshaler(t) + case reflect.Struct: + return makeStructArshaler(t) + case reflect.Slice: + fncs := makeSliceArshaler(t) + if t.AssignableTo(bytesType) { + return makeBytesArshaler(t, fncs) + } + return fncs + case reflect.Array: + fncs := makeArrayArshaler(t) + if reflect.SliceOf(t.Elem()).AssignableTo(bytesType) { + return makeBytesArshaler(t, fncs) + } + return fncs + case reflect.Pointer: + return makePointerArshaler(t) + case reflect.Interface: + return makeInterfaceArshaler(t) + default: + return makeInvalidArshaler(t) + } +} + +func makeBoolArshaler(t reflect.Type) *arshaler { + var fncs arshaler + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + + // Optimize for marshaling without preceding whitespace. + if optimizeCommon && !enc.options.multiline && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, 't') + if va.Bool() { + enc.buf = append(enc.buf, "true"...) + } else { + enc.buf = append(enc.buf, "false"...) + } + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + return enc.WriteToken(Bool(va.Bool())) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + tok, err := dec.ReadToken() + if err != nil { + return err + } + k := tok.Kind() + switch k { + case 'n': + va.SetBool(false) + return nil + case 't', 'f': + va.SetBool(tok.Bool()) + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func makeStringArshaler(t reflect.Type) *arshaler { + var fncs arshaler + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + return enc.WriteToken(String(va.String())) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err + } + k := val.Kind() + switch k { + case 'n': + va.SetString("") + return nil + case '"': + val = unescapeStringMayCopy(val, flags.isVerbatim()) + if dec.stringCache == nil { + dec.stringCache = new(stringCache) + } + str := dec.stringCache.make(val) + va.SetString(str) + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +var ( + encodeBase16 = func(dst, src []byte) { hex.Encode(dst, src) } + encodeBase32 = base32.StdEncoding.Encode + encodeBase32Hex = base32.HexEncoding.Encode + encodeBase64 = base64.StdEncoding.Encode + encodeBase64URL = base64.URLEncoding.Encode + encodedLenBase16 = hex.EncodedLen + encodedLenBase32 = base32.StdEncoding.EncodedLen + encodedLenBase32Hex = base32.HexEncoding.EncodedLen + encodedLenBase64 = base64.StdEncoding.EncodedLen + encodedLenBase64URL = base64.URLEncoding.EncodedLen + decodeBase16 = hex.Decode + decodeBase32 = base32.StdEncoding.Decode + decodeBase32Hex = base32.HexEncoding.Decode + decodeBase64 = base64.StdEncoding.Decode + decodeBase64URL = base64.URLEncoding.Decode + decodedLenBase16 = hex.DecodedLen + decodedLenBase32 = base32.StdEncoding.WithPadding(base32.NoPadding).DecodedLen + decodedLenBase32Hex = base32.HexEncoding.WithPadding(base32.NoPadding).DecodedLen + decodedLenBase64 = base64.StdEncoding.WithPadding(base64.NoPadding).DecodedLen + decodedLenBase64URL = base64.URLEncoding.WithPadding(base64.NoPadding).DecodedLen +) + +func makeBytesArshaler(t reflect.Type, fncs *arshaler) *arshaler { + // NOTE: This handles both []byte and [N]byte. + marshalDefault := fncs.marshal + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + encode, encodedLen := encodeBase64, encodedLenBase64 + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + switch mo.format { + case "base64": + encode, encodedLen = encodeBase64, encodedLenBase64 + case "base64url": + encode, encodedLen = encodeBase64URL, encodedLenBase64URL + case "base32": + encode, encodedLen = encodeBase32, encodedLenBase32 + case "base32hex": + encode, encodedLen = encodeBase32Hex, encodedLenBase32Hex + case "base16", "hex": + encode, encodedLen = encodeBase16, encodedLenBase16 + case "array": + mo.format = "" + return marshalDefault(mo, enc, va) + default: + return newInvalidFormatError("marshal", t, mo.format) + } + } + val := enc.UnusedBuffer() + b := va.Bytes() + n := len(`"`) + encodedLen(len(b)) + len(`"`) + if cap(val) < n { + val = make([]byte, n) + } else { + val = val[:n] + } + val[0] = '"' + encode(val[len(`"`):len(val)-len(`"`)], b) + val[len(val)-1] = '"' + return enc.WriteValue(val) + } + unmarshalDefault := fncs.unmarshal + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + decode, decodedLen, encodedLen := decodeBase64, decodedLenBase64, encodedLenBase64 + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + switch uo.format { + case "base64": + decode, decodedLen, encodedLen = decodeBase64, decodedLenBase64, encodedLenBase64 + case "base64url": + decode, decodedLen, encodedLen = decodeBase64URL, decodedLenBase64URL, encodedLenBase64URL + case "base32": + decode, decodedLen, encodedLen = decodeBase32, decodedLenBase32, encodedLenBase32 + case "base32hex": + decode, decodedLen, encodedLen = decodeBase32Hex, decodedLenBase32Hex, encodedLenBase32Hex + case "base16", "hex": + decode, decodedLen, encodedLen = decodeBase16, decodedLenBase16, encodedLenBase16 + case "array": + uo.format = "" + return unmarshalDefault(uo, dec, va) + default: + return newInvalidFormatError("unmarshal", t, uo.format) + } + } + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err + } + k := val.Kind() + switch k { + case 'n': + va.Set(reflect.Zero(t)) + return nil + case '"': + val = unescapeStringMayCopy(val, flags.isVerbatim()) + + // For base64 and base32, decodedLen computes the maximum output size + // when given the original input size. To compute the exact size, + // adjust the input size by excluding trailing padding characters. + // This is unnecessary for base16, but also harmless. + n := len(val) + for n > 0 && val[n-1] == '=' { + n-- + } + n = decodedLen(n) + b := va.Bytes() + if va.Kind() == reflect.Array { + if n != len(b) { + err := fmt.Errorf("decoded base64 length of %d mismatches array length of %d", n, len(b)) + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + } else { + if b == nil || cap(b) < n { + b = make([]byte, n) + } else { + b = b[:n] + } + } + n2, err := decode(b, val) + if err == nil && len(val) != encodedLen(n2) { + // TODO(https://go.dev/issue/53845): RFC 4648, section 3.3, + // specifies that non-alphabet characters must be rejected. + // Unfortunately, the "base32" and "base64" packages allow + // '\r' and '\n' characters by default. + err = errors.New("illegal data at input byte " + strconv.Itoa(bytes.IndexAny(val, "\r\n"))) + } + if err != nil { + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + if va.Kind() == reflect.Slice { + va.SetBytes(b) + } + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return fncs +} + +func makeIntArshaler(t reflect.Type) *arshaler { + var fncs arshaler + bits := t.Bits() + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + + // Optimize for marshaling without preceding whitespace or string escaping. + if optimizeCommon && !enc.options.multiline && !mo.StringifyNumbers && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '0') + enc.buf = strconv.AppendInt(enc.buf, va.Int(), 10) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + x := math.Float64frombits(uint64(va.Int())) + return enc.writeNumber(x, rawIntNumber, mo.StringifyNumbers) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err + } + k := val.Kind() + switch k { + case 'n': + va.SetInt(0) + return nil + case '"': + if !uo.StringifyNumbers { + break + } + val = unescapeStringMayCopy(val, flags.isVerbatim()) + fallthrough + case '0': + var negOffset int + neg := val[0] == '-' + if neg { + negOffset = 1 + } + n, ok := parseDecUint(val[negOffset:]) + maxInt := uint64(1) << (bits - 1) + overflow := (neg && n > maxInt) || (!neg && n > maxInt-1) + if !ok { + if n != math.MaxUint64 { + err := fmt.Errorf("cannot parse %q as signed integer: %w", val, strconv.ErrSyntax) + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + overflow = true + } + if overflow { + err := fmt.Errorf("cannot parse %q as signed integer: %w", val, strconv.ErrRange) + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + if neg { + va.SetInt(int64(-n)) + } else { + va.SetInt(int64(+n)) + } + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func makeUintArshaler(t reflect.Type) *arshaler { + var fncs arshaler + bits := t.Bits() + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + + // Optimize for marshaling without preceding whitespace or string escaping. + if optimizeCommon && !enc.options.multiline && !mo.StringifyNumbers && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '0') + enc.buf = strconv.AppendUint(enc.buf, va.Uint(), 10) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + x := math.Float64frombits(va.Uint()) + return enc.writeNumber(x, rawUintNumber, mo.StringifyNumbers) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err + } + k := val.Kind() + switch k { + case 'n': + va.SetUint(0) + return nil + case '"': + if !uo.StringifyNumbers { + break + } + val = unescapeStringMayCopy(val, flags.isVerbatim()) + fallthrough + case '0': + n, ok := parseDecUint(val) + maxUint := uint64(1) << bits + overflow := n > maxUint-1 + if !ok { + if n != math.MaxUint64 { + err := fmt.Errorf("cannot parse %q as unsigned integer: %w", val, strconv.ErrSyntax) + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + overflow = true + } + if overflow { + err := fmt.Errorf("cannot parse %q as unsigned integer: %w", val, strconv.ErrRange) + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + va.SetUint(n) + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func makeFloatArshaler(t reflect.Type) *arshaler { + var fncs arshaler + bits := t.Bits() + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + var allowNonFinite bool + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + if mo.format == "nonfinite" { + allowNonFinite = true + } else { + return newInvalidFormatError("marshal", t, mo.format) + } + } + + fv := va.Float() + if math.IsNaN(fv) || math.IsInf(fv, 0) { + if !allowNonFinite { + err := fmt.Errorf("invalid value: %v", fv) + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + return enc.WriteToken(Float(fv)) + } + + // Optimize for marshaling without preceding whitespace or string escaping. + if optimizeCommon && !enc.options.multiline && !mo.StringifyNumbers && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '0') + enc.buf = appendNumber(enc.buf, fv, bits) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + return enc.writeNumber(fv, bits, mo.StringifyNumbers) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + var allowNonFinite bool + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + if uo.format == "nonfinite" { + allowNonFinite = true + } else { + return newInvalidFormatError("unmarshal", t, uo.format) + } + } + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err + } + k := val.Kind() + switch k { + case 'n': + va.SetFloat(0) + return nil + case '"': + val = unescapeStringMayCopy(val, flags.isVerbatim()) + if allowNonFinite { + switch string(val) { + case "NaN": + va.SetFloat(math.NaN()) + return nil + case "Infinity": + va.SetFloat(math.Inf(+1)) + return nil + case "-Infinity": + va.SetFloat(math.Inf(-1)) + return nil + } + } + if !uo.StringifyNumbers { + break + } + if n, err := consumeNumber(val); n != len(val) || err != nil { + err := fmt.Errorf("cannot parse %q as JSON number: %w", val, strconv.ErrSyntax) + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + fallthrough + case '0': + // NOTE: Floating-point parsing is by nature a lossy operation. + // We never report an overflow condition since we can always + // round the input to the closest representable finite value. + // For extremely large numbers, the closest value is ±MaxFloat. + fv, _ := parseFloat(val, bits) + va.SetFloat(fv) + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func makeMapArshaler(t reflect.Type) *arshaler { + // NOTE: The logic below disables namespaces for tracking duplicate names + // when handling map keys with a unique representation. + + // NOTE: Values retrieved from a map are not addressable, + // so we shallow copy the values to make them addressable and + // store them back into the map afterwards. + + var fncs arshaler + var ( + once sync.Once + keyFncs *arshaler + valFncs *arshaler + ) + init := func() { + keyFncs = lookupArshaler(t.Key()) + valFncs = lookupArshaler(t.Elem()) + } + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + // Check for cycles. + if enc.tokens.depth() > startDetectingCyclesAfter { + if err := enc.seenPointers.visit(va.Value); err != nil { + return err + } + defer enc.seenPointers.leave(va.Value) + } + + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + if mo.format == "emitnull" { + if va.IsNil() { + return enc.WriteToken(Null) + } + mo.format = "" + } else { + return newInvalidFormatError("marshal", t, mo.format) + } + } + + // Optimize for marshaling an empty map without any preceding whitespace. + n := va.Len() + if optimizeCommon && n == 0 && !enc.options.multiline && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '{') + enc.buf = append(enc.buf, "{}"...) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + once.Do(init) + if err := enc.WriteToken(ObjectStart); err != nil { + return err + } + if n > 0 { + // Handle maps with numeric key types by stringifying them. + mko := mo + mko.StringifyNumbers = true + + nonDefaultKey := keyFncs.nonDefault + marshalKey := keyFncs.marshal + marshalVal := valFncs.marshal + if mo.Marshalers != nil { + var ok bool + marshalKey, ok = mo.Marshalers.lookup(marshalKey, t.Key()) + marshalVal, _ = mo.Marshalers.lookup(marshalVal, t.Elem()) + nonDefaultKey = nonDefaultKey || ok + } + k := newAddressableValue(t.Key()) + v := newAddressableValue(t.Elem()) + + // A Go map guarantees that each entry has a unique key. + // As such, disable the expensive duplicate name check if we know + // that every Go key will serialize as a unique JSON string. + if !nonDefaultKey && mapKeyWithUniqueRepresentation(k.Kind(), enc.options.AllowInvalidUTF8) { + enc.tokens.last.disableNamespace() + } + + switch { + case !mo.Deterministic || n <= 1: + for iter := va.Value.MapRange(); iter.Next(); { + k.SetIterKey(iter) + if err := marshalKey(mko, enc, k); err != nil { + // TODO: If err is errMissingName, then wrap it as a + // SemanticError since this key type cannot be serialized + // as a JSON string. + return err + } + v.SetIterValue(iter) + if err := marshalVal(mo, enc, v); err != nil { + return err + } + } + case !nonDefaultKey && t.Key().Kind() == reflect.String: + names := getStrings(n) + for i, iter := 0, va.Value.MapRange(); i < n && iter.Next(); i++ { + k.SetIterKey(iter) + (*names)[i] = k.String() + } + names.Sort() + for _, name := range *names { + if err := enc.WriteToken(String(name)); err != nil { + return err + } + // TODO(https://go.dev/issue/57061): Use v.SetMapIndexOf. + k.SetString(name) + v.Set(va.MapIndex(k.Value)) + if err := marshalVal(mo, enc, v); err != nil { + return err + } + } + putStrings(names) + default: + type member struct { + name string // unquoted name + key addressableValue + } + members := make([]member, n) + keys := reflect.MakeSlice(reflect.SliceOf(t.Key()), n, n) + for i, iter := 0, va.Value.MapRange(); i < n && iter.Next(); i++ { + // Marshal the member name. + k := addressableValue{keys.Index(i)} // indexed slice element is always addressable + k.SetIterKey(iter) + if err := marshalKey(mko, enc, k); err != nil { + // TODO: If err is errMissingName, then wrap it as a + // SemanticError since this key type cannot be serialized + // as a JSON string. + return err + } + name := enc.unwriteOnlyObjectMemberName() + members[i] = member{name, k} + } + // TODO: If AllowDuplicateNames is enabled, then sort according + // to reflect.Value as well if the names are equal. + // See internal/fmtsort. + // TODO(https://go.dev/issue/47619): Use slices.SortFunc instead. + sort.Slice(members, func(i, j int) bool { + return lessUTF16(members[i].name, members[j].name) + }) + for _, member := range members { + if err := enc.WriteToken(String(member.name)); err != nil { + return err + } + // TODO(https://go.dev/issue/57061): Use v.SetMapIndexOf. + v.Set(va.MapIndex(member.key.Value)) + if err := marshalVal(mo, enc, v); err != nil { + return err + } + } + } + } + if err := enc.WriteToken(ObjectEnd); err != nil { + return err + } + return nil + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + if uo.format == "emitnull" { + uo.format = "" // only relevant for marshaling + } else { + return newInvalidFormatError("unmarshal", t, uo.format) + } + } + tok, err := dec.ReadToken() + if err != nil { + return err + } + k := tok.Kind() + switch k { + case 'n': + va.Set(reflect.Zero(t)) + return nil + case '{': + once.Do(init) + if va.IsNil() { + va.Set(reflect.MakeMap(t)) + } + + // Handle maps with numeric key types by stringifying them. + uko := uo + uko.StringifyNumbers = true + + nonDefaultKey := keyFncs.nonDefault + unmarshalKey := keyFncs.unmarshal + unmarshalVal := valFncs.unmarshal + if uo.Unmarshalers != nil { + var ok bool + unmarshalKey, ok = uo.Unmarshalers.lookup(unmarshalKey, t.Key()) + unmarshalVal, _ = uo.Unmarshalers.lookup(unmarshalVal, t.Elem()) + nonDefaultKey = nonDefaultKey || ok + } + k := newAddressableValue(t.Key()) + v := newAddressableValue(t.Elem()) + + // Manually check for duplicate entries by virtue of whether the + // unmarshaled key already exists in the destination Go map. + // Consequently, syntactically different names (e.g., "0" and "-0") + // will be rejected as duplicates since they semantically refer + // to the same Go value. This is an unusual interaction + // between syntax and semantics, but is more correct. + if !nonDefaultKey && mapKeyWithUniqueRepresentation(k.Kind(), dec.options.AllowInvalidUTF8) { + dec.tokens.last.disableNamespace() + } + + // In the rare case where the map is not already empty, + // then we need to manually track which keys we already saw + // since existing presence alone is insufficient to indicate + // whether the input had a duplicate name. + var seen reflect.Value + if !dec.options.AllowDuplicateNames && va.Len() > 0 { + seen = reflect.MakeMap(reflect.MapOf(k.Type(), emptyStructType)) + } + + for dec.PeekKind() != '}' { + k.Set(reflect.Zero(t.Key())) + if err := unmarshalKey(uko, dec, k); err != nil { + return err + } + if k.Kind() == reflect.Interface && !k.IsNil() && !k.Elem().Type().Comparable() { + err := fmt.Errorf("invalid incomparable key type %v", k.Elem().Type()) + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + + if v2 := va.MapIndex(k.Value); v2.IsValid() { + if !dec.options.AllowDuplicateNames && (!seen.IsValid() || seen.MapIndex(k.Value).IsValid()) { + // TODO: Unread the object name. + name := dec.previousBuffer() + err := &SyntacticError{str: "duplicate name " + string(name) + " in object"} + return err.withOffset(dec.InputOffset() - int64(len(name))) + } + v.Set(v2) + } else { + v.Set(reflect.Zero(v.Type())) + } + err := unmarshalVal(uo, dec, v) + va.SetMapIndex(k.Value, v.Value) + if seen.IsValid() { + seen.SetMapIndex(k.Value, reflect.Zero(emptyStructType)) + } + if err != nil { + return err + } + } + if _, err := dec.ReadToken(); err != nil { + return err + } + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +// mapKeyWithUniqueRepresentation reports whether all possible values of k +// marshal to a different JSON value, and whether all possible JSON values +// that can unmarshal into k unmarshal to different Go values. +// In other words, the representation must be a bijective. +func mapKeyWithUniqueRepresentation(k reflect.Kind, allowInvalidUTF8 bool) bool { + switch k { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return true + case reflect.String: + // For strings, we have to be careful since names with invalid UTF-8 + // maybe unescape to the same Go string value. + return !allowInvalidUTF8 + default: + // Floating-point kinds are not listed above since NaNs + // can appear multiple times and all serialize as "NaN". + return false + } +} + +func makeStructArshaler(t reflect.Type) *arshaler { + // NOTE: The logic below disables namespaces for tracking duplicate names + // and does the tracking locally with an efficient bit-set based on which + // Go struct fields were seen. + + var fncs arshaler + var ( + once sync.Once + fields structFields + errInit *SemanticError + ) + init := func() { + fields, errInit = makeStructFields(t) + } + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + once.Do(init) + if errInit != nil { + err := *errInit // shallow copy SemanticError + err.action = "marshal" + return &err + } + if err := enc.WriteToken(ObjectStart); err != nil { + return err + } + var seenIdxs uintSet + prevIdx := -1 + enc.tokens.last.disableNamespace() // we manually ensure unique names below + for i := range fields.flattened { + f := &fields.flattened[i] + v := addressableValue{va.Field(f.index[0])} // addressable if struct value is addressable + if len(f.index) > 1 { + v = v.fieldByIndex(f.index[1:], false) + if !v.IsValid() { + continue // implies a nil inlined field + } + } + + // OmitZero skips the field if the Go value is zero, + // which we can determine up front without calling the marshaler. + if f.omitzero && ((f.isZero == nil && v.IsZero()) || (f.isZero != nil && f.isZero(v))) { + continue + } + + marshal := f.fncs.marshal + nonDefault := f.fncs.nonDefault + if mo.Marshalers != nil { + var ok bool + marshal, ok = mo.Marshalers.lookup(marshal, f.typ) + nonDefault = nonDefault || ok + } + + // OmitEmpty skips the field if the marshaled JSON value is empty, + // which we can know up front if there are no custom marshalers, + // otherwise we must marshal the value and unwrite it if empty. + if f.omitempty && !nonDefault && f.isEmpty != nil && f.isEmpty(v) { + continue // fast path for omitempty + } + + // Write the object member name. + // + // The logic below is semantically equivalent to: + // enc.WriteToken(String(f.name)) + // but specialized and simplified because: + // 1. The Encoder must be expecting an object name. + // 2. The object namespace is guaranteed to be disabled. + // 3. The object name is guaranteed to be valid and pre-escaped. + // 4. There is no need to flush the buffer (for unwrite purposes). + // 5. There is no possibility of an error occurring. + if optimizeCommon { + // Append any delimiters or optional whitespace. + if enc.tokens.last.length() > 0 { + enc.buf = append(enc.buf, ',') + } + if enc.options.multiline { + enc.buf = enc.appendIndent(enc.buf, enc.tokens.needIndent('"')) + } + + // Append the token to the output and to the state machine. + n0 := len(enc.buf) // offset before calling appendString + if enc.options.EscapeRune == nil { + enc.buf = append(enc.buf, f.quotedName...) + } else { + enc.buf, _ = appendString(enc.buf, f.name, false, enc.options.EscapeRune) + } + if !enc.options.AllowDuplicateNames { + enc.names.replaceLastQuotedOffset(n0) + } + enc.tokens.last.increment() + } else { + if err := enc.WriteToken(String(f.name)); err != nil { + return err + } + } + + // Write the object member value. + mo2 := mo + if f.string { + mo2.StringifyNumbers = true + } + if f.format != "" { + mo2.formatDepth = enc.tokens.depth() + mo2.format = f.format + } + if err := marshal(mo2, enc, v); err != nil { + return err + } + + // Try unwriting the member if empty (slow path for omitempty). + if f.omitempty { + var prevName *string + if prevIdx >= 0 { + prevName = &fields.flattened[prevIdx].name + } + if enc.unwriteEmptyObjectMember(prevName) { + continue + } + } + + // Remember the previous written object member. + // The set of seen fields only needs to be updated to detect + // duplicate names with those from the inlined fallback. + if !enc.options.AllowDuplicateNames && fields.inlinedFallback != nil { + seenIdxs.insert(uint(f.id)) + } + prevIdx = f.id + } + if fields.inlinedFallback != nil && !(mo.DiscardUnknownMembers && fields.inlinedFallback.unknown) { + var insertUnquotedName func([]byte) bool + if !enc.options.AllowDuplicateNames { + insertUnquotedName = func(name []byte) bool { + // Check that the name from inlined fallback does not match + // one of the previously marshaled names from known fields. + if foldedFields := fields.byFoldedName[string(foldName(name))]; len(foldedFields) > 0 { + if f := fields.byActualName[string(name)]; f != nil { + return seenIdxs.insert(uint(f.id)) + } + for _, f := range foldedFields { + if f.nocase { + return seenIdxs.insert(uint(f.id)) + } + } + } + + // Check that the name does not match any other name + // previously marshaled from the inlined fallback. + return enc.namespaces.last().insertUnquoted(name) + } + } + if err := marshalInlinedFallbackAll(mo, enc, va, fields.inlinedFallback, insertUnquotedName); err != nil { + return err + } + } + if err := enc.WriteToken(ObjectEnd); err != nil { + return err + } + return nil + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + tok, err := dec.ReadToken() + if err != nil { + return err + } + k := tok.Kind() + switch k { + case 'n': + va.Set(reflect.Zero(t)) + return nil + case '{': + once.Do(init) + if errInit != nil { + err := *errInit // shallow copy SemanticError + err.action = "unmarshal" + return &err + } + var seenIdxs uintSet + dec.tokens.last.disableNamespace() + for dec.PeekKind() != '}' { + // Process the object member name. + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err + } + name := unescapeStringMayCopy(val, flags.isVerbatim()) + f := fields.byActualName[string(name)] + if f == nil { + for _, f2 := range fields.byFoldedName[string(foldName(name))] { + if f2.nocase { + f = f2 + break + } + } + if f == nil { + if uo.RejectUnknownMembers && (fields.inlinedFallback == nil || fields.inlinedFallback.unknown) { + return &SemanticError{action: "unmarshal", GoType: t, Err: fmt.Errorf("unknown name %s", val)} + } + if !dec.options.AllowDuplicateNames && !dec.namespaces.last().insertUnquoted(name) { + // TODO: Unread the object name. + err := &SyntacticError{str: "duplicate name " + string(val) + " in object"} + return err.withOffset(dec.InputOffset() - int64(len(val))) + } + + if fields.inlinedFallback == nil { + // Skip unknown value since we have no place to store it. + if err := dec.SkipValue(); err != nil { + return err + } + } else { + // Marshal into value capable of storing arbitrary object members. + if err := unmarshalInlinedFallbackNext(uo, dec, va, fields.inlinedFallback, val, name); err != nil { + return err + } + } + continue + } + } + if !dec.options.AllowDuplicateNames && !seenIdxs.insert(uint(f.id)) { + // TODO: Unread the object name. + err := &SyntacticError{str: "duplicate name " + string(val) + " in object"} + return err.withOffset(dec.InputOffset() - int64(len(val))) + } + + // Process the object member value. + unmarshal := f.fncs.unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, f.typ) + } + uo2 := uo + if f.string { + uo2.StringifyNumbers = true + } + if f.format != "" { + uo2.formatDepth = dec.tokens.depth() + uo2.format = f.format + } + v := addressableValue{va.Field(f.index[0])} // addressable if struct value is addressable + if len(f.index) > 1 { + v = v.fieldByIndex(f.index[1:], true) + } + if err := unmarshal(uo2, dec, v); err != nil { + return err + } + } + if _, err := dec.ReadToken(); err != nil { + return err + } + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func (va addressableValue) fieldByIndex(index []int, mayAlloc bool) addressableValue { + for _, i := range index { + va = va.indirect(mayAlloc) + if !va.IsValid() { + return va + } + va = addressableValue{va.Field(i)} // addressable if struct value is addressable + } + return va +} + +func (va addressableValue) indirect(mayAlloc bool) addressableValue { + if va.Kind() == reflect.Pointer { + if va.IsNil() { + if !mayAlloc { + return addressableValue{} + } + va.Set(reflect.New(va.Type().Elem())) + } + va = addressableValue{va.Elem()} // dereferenced pointer is always addressable + } + return va +} + +func makeSliceArshaler(t reflect.Type) *arshaler { + var fncs arshaler + var ( + once sync.Once + valFncs *arshaler + ) + init := func() { + valFncs = lookupArshaler(t.Elem()) + } + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + // Check for cycles. + if enc.tokens.depth() > startDetectingCyclesAfter { + if err := enc.seenPointers.visit(va.Value); err != nil { + return err + } + defer enc.seenPointers.leave(va.Value) + } + + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + if mo.format == "emitnull" { + if va.IsNil() { + return enc.WriteToken(Null) + } + mo.format = "" + } else { + return newInvalidFormatError("marshal", t, mo.format) + } + } + + // Optimize for marshaling an empty slice without any preceding whitespace. + n := va.Len() + if optimizeCommon && n == 0 && !enc.options.multiline && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '[') + enc.buf = append(enc.buf, "[]"...) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + once.Do(init) + if err := enc.WriteToken(ArrayStart); err != nil { + return err + } + marshal := valFncs.marshal + if mo.Marshalers != nil { + marshal, _ = mo.Marshalers.lookup(marshal, t.Elem()) + } + for i := 0; i < n; i++ { + v := addressableValue{va.Index(i)} // indexed slice element is always addressable + if err := marshal(mo, enc, v); err != nil { + return err + } + } + if err := enc.WriteToken(ArrayEnd); err != nil { + return err + } + return nil + } + emptySlice := reflect.MakeSlice(t, 0, 0) + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + if uo.format == "emitnull" { + uo.format = "" // only relevant for marshaling + } else { + return newInvalidFormatError("unmarshal", t, uo.format) + } + } + + tok, err := dec.ReadToken() + if err != nil { + return err + } + k := tok.Kind() + switch k { + case 'n': + va.Set(reflect.Zero(t)) + return nil + case '[': + once.Do(init) + unmarshal := valFncs.unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, t.Elem()) + } + mustZero := true // we do not know the cleanliness of unused capacity + cap := va.Cap() + if cap > 0 { + va.SetLen(cap) + } + var i int + for dec.PeekKind() != ']' { + if i == cap { + // TODO(https://go.dev/issue/48000): Use reflect.Value.Append. + va.Set(reflect.Append(va.Value, reflect.Zero(t.Elem()))) + cap = va.Cap() + va.SetLen(cap) + mustZero = false // append guarantees that unused capacity is zero-initialized + } + v := addressableValue{va.Index(i)} // indexed slice element is always addressable + i++ + if mustZero { + v.Set(reflect.Zero(t.Elem())) + } + if err := unmarshal(uo, dec, v); err != nil { + va.SetLen(i) + return err + } + } + if i == 0 { + va.Set(emptySlice) + } else { + va.SetLen(i) + } + if _, err := dec.ReadToken(); err != nil { + return err + } + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func makeArrayArshaler(t reflect.Type) *arshaler { + var fncs arshaler + var ( + once sync.Once + valFncs *arshaler + ) + init := func() { + valFncs = lookupArshaler(t.Elem()) + } + n := t.Len() + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + once.Do(init) + if err := enc.WriteToken(ArrayStart); err != nil { + return err + } + marshal := valFncs.marshal + if mo.Marshalers != nil { + marshal, _ = mo.Marshalers.lookup(marshal, t.Elem()) + } + for i := 0; i < n; i++ { + v := addressableValue{va.Index(i)} // indexed array element is addressable if array is addressable + if err := marshal(mo, enc, v); err != nil { + return err + } + } + if err := enc.WriteToken(ArrayEnd); err != nil { + return err + } + return nil + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + tok, err := dec.ReadToken() + if err != nil { + return err + } + k := tok.Kind() + switch k { + case 'n': + va.Set(reflect.Zero(t)) + return nil + case '[': + once.Do(init) + unmarshal := valFncs.unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, t.Elem()) + } + var i int + for dec.PeekKind() != ']' { + if i >= n { + err := errors.New("too many array elements") + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + v := addressableValue{va.Index(i)} // indexed array element is addressable if array is addressable + v.Set(reflect.Zero(v.Type())) + if err := unmarshal(uo, dec, v); err != nil { + return err + } + i++ + } + if _, err := dec.ReadToken(); err != nil { + return err + } + if i < n { + err := errors.New("too few array elements") + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func makePointerArshaler(t reflect.Type) *arshaler { + var fncs arshaler + var ( + once sync.Once + valFncs *arshaler + ) + init := func() { + valFncs = lookupArshaler(t.Elem()) + } + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + // Check for cycles. + if enc.tokens.depth() > startDetectingCyclesAfter { + if err := enc.seenPointers.visit(va.Value); err != nil { + return err + } + defer enc.seenPointers.leave(va.Value) + } + + // NOTE: MarshalOptions.format is forwarded to underlying marshal. + if va.IsNil() { + return enc.WriteToken(Null) + } + once.Do(init) + marshal := valFncs.marshal + if mo.Marshalers != nil { + marshal, _ = mo.Marshalers.lookup(marshal, t.Elem()) + } + v := addressableValue{va.Elem()} // dereferenced pointer is always addressable + return marshal(mo, enc, v) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + // NOTE: UnmarshalOptions.format is forwarded to underlying unmarshal. + if dec.PeekKind() == 'n' { + if _, err := dec.ReadToken(); err != nil { + return err + } + va.Set(reflect.Zero(t)) + return nil + } + once.Do(init) + unmarshal := valFncs.unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, t.Elem()) + } + if va.IsNil() { + va.Set(reflect.New(t.Elem())) + } + v := addressableValue{va.Elem()} // dereferenced pointer is always addressable + return unmarshal(uo, dec, v) + } + return &fncs +} + +func makeInterfaceArshaler(t reflect.Type) *arshaler { + // NOTE: Values retrieved from an interface are not addressable, + // so we shallow copy the values to make them addressable and + // store them back into the interface afterwards. + + var fncs arshaler + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + if va.IsNil() { + return enc.WriteToken(Null) + } + v := newAddressableValue(va.Elem().Type()) + v.Set(va.Elem()) + marshal := lookupArshaler(v.Type()).marshal + if mo.Marshalers != nil { + marshal, _ = mo.Marshalers.lookup(marshal, v.Type()) + } + // Optimize for the any type if there are no special options. + if optimizeCommon && t == anyType && !mo.StringifyNumbers && mo.format == "" && (mo.Marshalers == nil || !mo.Marshalers.fromAny) { + return marshalValueAny(mo, enc, va.Elem().Interface()) + } + return marshal(mo, enc, v) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + if dec.PeekKind() == 'n' { + if _, err := dec.ReadToken(); err != nil { + return err + } + va.Set(reflect.Zero(t)) + return nil + } + var v addressableValue + if va.IsNil() { + // Optimize for the any type if there are no special options. + // We do not care about stringified numbers since JSON strings + // are always unmarshaled into an any value as Go strings. + // Duplicate name check must be enforced since unmarshalValueAny + // does not implement merge semantics. + if optimizeCommon && t == anyType && uo.format == "" && (uo.Unmarshalers == nil || !uo.Unmarshalers.fromAny) && !dec.options.AllowDuplicateNames { + v, err := unmarshalValueAny(uo, dec) + // We must check for nil interface values up front. + // See https://go.dev/issue/52310. + if v != nil { + va.Set(reflect.ValueOf(v)) + } + return err + } + + k := dec.PeekKind() + if !isAnyType(t) { + err := errors.New("cannot derive concrete type for non-empty interface") + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + switch k { + case 'f', 't': + v = newAddressableValue(boolType) + case '"': + v = newAddressableValue(stringType) + case '0': + v = newAddressableValue(float64Type) + case '{': + v = newAddressableValue(mapStringAnyType) + case '[': + v = newAddressableValue(sliceAnyType) + default: + // If k is invalid (e.g., due to an I/O or syntax error), then + // that will be cached by PeekKind and returned by ReadValue. + // If k is '}' or ']', then ReadValue must error since + // those are invalid kinds at the start of a JSON value. + _, err := dec.ReadValue() + return err + } + } else { + // Shallow copy the existing value to keep it addressable. + // Any mutations at the top-level of the value will be observable + // since we always store this value back into the interface value. + v = newAddressableValue(va.Elem().Type()) + v.Set(va.Elem()) + } + unmarshal := lookupArshaler(v.Type()).unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, v.Type()) + } + err := unmarshal(uo, dec, v) + va.Set(v.Value) + return err + } + return &fncs +} + +// isAnyType reports wether t is equivalent to the any interface type. +func isAnyType(t reflect.Type) bool { + // This is forward compatible if the Go language permits type sets within + // ordinary interfaces where an interface with zero methods does not + // necessarily mean it can hold every possible Go type. + // See https://go.dev/issue/45346. + return t == anyType || anyType.Implements(t) +} + +func makeInvalidArshaler(t reflect.Type) *arshaler { + var fncs arshaler + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + return &SemanticError{action: "marshal", GoType: t} + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + return &SemanticError{action: "unmarshal", GoType: t} + } + return &fncs +} + +func newInvalidFormatError(action string, t reflect.Type, format string) error { + err := fmt.Errorf("invalid format flag: %q", format) + return &SemanticError{action: action, GoType: t, Err: err} +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_funcs.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_funcs.go new file mode 100644 index 000000000..8a4e70083 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_funcs.go @@ -0,0 +1,387 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "errors" + "fmt" + "reflect" + "sync" +) + +// SkipFunc may be returned by MarshalFuncV2 and UnmarshalFuncV2 functions. +// +// Any function that returns SkipFunc must not cause observable side effects +// on the provided Encoder or Decoder. For example, it is permissible to call +// Decoder.PeekKind, but not permissible to call Decoder.ReadToken or +// Encoder.WriteToken since such methods mutate the state. +const SkipFunc = jsonError("skip function") + +// Marshalers is a list of functions that may override the marshal behavior +// of specific types. Populate MarshalOptions.Marshalers to use it. +// A nil *Marshalers is equivalent to an empty list. +type Marshalers = typedMarshalers + +// NewMarshalers constructs a flattened list of marshal functions. +// If multiple functions in the list are applicable for a value of a given type, +// then those earlier in the list take precedence over those that come later. +// If a function returns SkipFunc, then the next applicable function is called, +// otherwise the default marshaling behavior is used. +// +// For example: +// +// m1 := NewMarshalers(f1, f2) +// m2 := NewMarshalers(f0, m1, f3) // equivalent to m3 +// m3 := NewMarshalers(f0, f1, f2, f3) // equivalent to m2 +func NewMarshalers(ms ...*Marshalers) *Marshalers { + return newMarshalers(ms...) +} + +// Unmarshalers is a list of functions that may override the unmarshal behavior +// of specific types. Populate UnmarshalOptions.Unmarshalers to use it. +// A nil *Unmarshalers is equivalent to an empty list. +type Unmarshalers = typedUnmarshalers + +// NewUnmarshalers constructs a flattened list of unmarshal functions. +// If multiple functions in the list are applicable for a value of a given type, +// then those earlier in the list take precedence over those that come later. +// If a function returns SkipFunc, then the next applicable function is called, +// otherwise the default unmarshaling behavior is used. +// +// For example: +// +// u1 := NewUnmarshalers(f1, f2) +// u2 := NewUnmarshalers(f0, u1, f3) // equivalent to u3 +// u3 := NewUnmarshalers(f0, f1, f2, f3) // equivalent to u2 +func NewUnmarshalers(us ...*Unmarshalers) *Unmarshalers { + return newUnmarshalers(us...) +} + +type typedMarshalers = typedArshalers[MarshalOptions, Encoder] +type typedUnmarshalers = typedArshalers[UnmarshalOptions, Decoder] +type typedArshalers[Options, Coder any] struct { + nonComparable + + fncVals []typedArshaler[Options, Coder] + fncCache sync.Map // map[reflect.Type]arshaler + + // fromAny reports whether any of Go types used to represent arbitrary JSON + // (i.e., any, bool, string, float64, map[string]any, or []any) matches + // any of the provided type-specific arshalers. + // + // This bit of information is needed in arshal_default.go to determine + // whether to use the specialized logic in arshal_any.go to handle + // the any interface type. The logic in arshal_any.go does not support + // type-specific arshal functions, so we must avoid using that logic + // if this is true. + fromAny bool +} +type typedMarshaler = typedArshaler[MarshalOptions, Encoder] +type typedUnmarshaler = typedArshaler[UnmarshalOptions, Decoder] +type typedArshaler[Options, Coder any] struct { + typ reflect.Type + fnc func(Options, *Coder, addressableValue) error + maySkip bool +} + +func newMarshalers(ms ...*Marshalers) *Marshalers { return newTypedArshalers(ms...) } +func newUnmarshalers(us ...*Unmarshalers) *Unmarshalers { return newTypedArshalers(us...) } +func newTypedArshalers[Options, Coder any](as ...*typedArshalers[Options, Coder]) *typedArshalers[Options, Coder] { + var a typedArshalers[Options, Coder] + for _, a2 := range as { + if a2 != nil { + a.fncVals = append(a.fncVals, a2.fncVals...) + a.fromAny = a.fromAny || a2.fromAny + } + } + if len(a.fncVals) == 0 { + return nil + } + return &a +} + +func (a *typedArshalers[Options, Coder]) lookup(fnc func(Options, *Coder, addressableValue) error, t reflect.Type) (func(Options, *Coder, addressableValue) error, bool) { + if a == nil { + return fnc, false + } + if v, ok := a.fncCache.Load(t); ok { + if v == nil { + return fnc, false + } + return v.(func(Options, *Coder, addressableValue) error), true + } + + // Collect a list of arshalers that can be called for this type. + // This list may be longer than 1 since some arshalers can be skipped. + var fncs []func(Options, *Coder, addressableValue) error + for _, fncVal := range a.fncVals { + if !castableTo(t, fncVal.typ) { + continue + } + fncs = append(fncs, fncVal.fnc) + if !fncVal.maySkip { + break // subsequent arshalers will never be called + } + } + + if len(fncs) == 0 { + a.fncCache.Store(t, nil) // nil to indicate that no funcs found + return fnc, false + } + + // Construct an arshaler that may call every applicable arshaler. + fncDefault := fnc + fnc = func(o Options, c *Coder, v addressableValue) error { + for _, fnc := range fncs { + if err := fnc(o, c, v); err != SkipFunc { + return err // may be nil or non-nil + } + } + return fncDefault(o, c, v) + } + + // Use the first stored so duplicate work can be garbage collected. + v, _ := a.fncCache.LoadOrStore(t, fnc) + return v.(func(Options, *Coder, addressableValue) error), true +} + +// MarshalFuncV1 constructs a type-specific marshaler that +// specifies how to marshal values of type T. +// T can be any type except a named pointer. +// The function is always provided with a non-nil pointer value +// if T is an interface or pointer type. +// +// The function must marshal exactly one JSON value. +// The value of T must not be retained outside the function call. +// It may not return SkipFunc. +func MarshalFuncV1[T any](fn func(T) ([]byte, error)) *Marshalers { + t := reflect.TypeOf((*T)(nil)).Elem() + assertCastableTo(t, true) + typFnc := typedMarshaler{ + typ: t, + fnc: func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + val, err := fn(va.castTo(t).Interface().(T)) + if err != nil { + err = wrapSkipFunc(err, "marshal function of type func(T) ([]byte, error)") + // TODO: Avoid wrapping semantic errors. + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + if err := enc.WriteValue(val); err != nil { + // TODO: Avoid wrapping semantic or I/O errors. + return &SemanticError{action: "marshal", JSONKind: RawValue(val).Kind(), GoType: t, Err: err} + } + return nil + }, + } + return &Marshalers{fncVals: []typedMarshaler{typFnc}, fromAny: castableToFromAny(t)} +} + +// MarshalFuncV2 constructs a type-specific marshaler that +// specifies how to marshal values of type T. +// T can be any type except a named pointer. +// The function is always provided with a non-nil pointer value +// if T is an interface or pointer type. +// +// The function must marshal exactly one JSON value by calling write methods +// on the provided encoder. It may return SkipFunc such that marshaling can +// move on to the next marshal function. However, no mutable method calls may +// be called on the encoder if SkipFunc is returned. +// The pointer to Encoder and the value of T must not be retained +// outside the function call. +func MarshalFuncV2[T any](fn func(MarshalOptions, *Encoder, T) error) *Marshalers { + t := reflect.TypeOf((*T)(nil)).Elem() + assertCastableTo(t, true) + typFnc := typedMarshaler{ + typ: t, + fnc: func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + prevDepth, prevLength := enc.tokens.depthLength() + err := fn(mo, enc, va.castTo(t).Interface().(T)) + currDepth, currLength := enc.tokens.depthLength() + if err == nil && (prevDepth != currDepth || prevLength+1 != currLength) { + err = errors.New("must write exactly one JSON value") + } + if err != nil { + if err == SkipFunc { + if prevDepth == currDepth && prevLength == currLength { + return SkipFunc + } + err = errors.New("must not write any JSON tokens when skipping") + } + // TODO: Avoid wrapping semantic or I/O errors. + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + return nil + }, + maySkip: true, + } + return &Marshalers{fncVals: []typedMarshaler{typFnc}, fromAny: castableToFromAny(t)} +} + +// UnmarshalFuncV1 constructs a type-specific unmarshaler that +// specifies how to unmarshal values of type T. +// T must be an unnamed pointer or an interface type. +// The function is always provided with a non-nil pointer value. +// +// The function must unmarshal exactly one JSON value. +// The input []byte must not be mutated. +// The input []byte and value T must not be retained outside the function call. +// It may not return SkipFunc. +func UnmarshalFuncV1[T any](fn func([]byte, T) error) *Unmarshalers { + t := reflect.TypeOf((*T)(nil)).Elem() + assertCastableTo(t, false) + typFnc := typedUnmarshaler{ + typ: t, + fnc: func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + val, err := dec.ReadValue() + if err != nil { + return err // must be a syntactic or I/O error + } + err = fn(val, va.castTo(t).Interface().(T)) + if err != nil { + err = wrapSkipFunc(err, "unmarshal function of type func([]byte, T) error") + // TODO: Avoid wrapping semantic, syntactic, or I/O errors. + return &SemanticError{action: "unmarshal", JSONKind: val.Kind(), GoType: t, Err: err} + } + return nil + }, + } + return &Unmarshalers{fncVals: []typedUnmarshaler{typFnc}, fromAny: castableToFromAny(t)} +} + +// UnmarshalFuncV2 constructs a type-specific unmarshaler that +// specifies how to unmarshal values of type T. +// T must be an unnamed pointer or an interface type. +// The function is always provided with a non-nil pointer value. +// +// The function must unmarshal exactly one JSON value by calling read methods +// on the provided decoder. It may return SkipFunc such that unmarshaling can +// move on to the next unmarshal function. However, no mutable method calls may +// be called on the decoder if SkipFunc is returned. +// The pointer to Decoder and the value of T must not be retained +// outside the function call. +func UnmarshalFuncV2[T any](fn func(UnmarshalOptions, *Decoder, T) error) *Unmarshalers { + t := reflect.TypeOf((*T)(nil)).Elem() + assertCastableTo(t, false) + typFnc := typedUnmarshaler{ + typ: t, + fnc: func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + prevDepth, prevLength := dec.tokens.depthLength() + err := fn(uo, dec, va.castTo(t).Interface().(T)) + currDepth, currLength := dec.tokens.depthLength() + if err == nil && (prevDepth != currDepth || prevLength+1 != currLength) { + err = errors.New("must read exactly one JSON value") + } + if err != nil { + if err == SkipFunc { + if prevDepth == currDepth && prevLength == currLength { + return SkipFunc + } + err = errors.New("must not read any JSON tokens when skipping") + } + // TODO: Avoid wrapping semantic, syntactic, or I/O errors. + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + return nil + }, + maySkip: true, + } + return &Unmarshalers{fncVals: []typedUnmarshaler{typFnc}, fromAny: castableToFromAny(t)} +} + +// assertCastableTo asserts that "to" is a valid type to be casted to. +// These are the Go types that type-specific arshalers may operate upon. +// +// Let AllTypes be the universal set of all possible Go types. +// This function generally asserts that: +// +// len([from for from in AllTypes if castableTo(from, to)]) > 0 +// +// otherwise it panics. +// +// As a special-case if marshal is false, then we forbid any non-pointer or +// non-interface type since it is almost always a bug trying to unmarshal +// into something where the end-user caller did not pass in an addressable value +// since they will not observe the mutations. +func assertCastableTo(to reflect.Type, marshal bool) { + switch to.Kind() { + case reflect.Interface: + return + case reflect.Pointer: + // Only allow unnamed pointers to be consistent with the fact that + // taking the address of a value produces an unnamed pointer type. + if to.Name() == "" { + return + } + default: + // Technically, non-pointer types are permissible for unmarshal. + // However, they are often a bug since the receiver would be immutable. + // Thus, only allow them for marshaling. + if marshal { + return + } + } + if marshal { + panic(fmt.Sprintf("input type %v must be an interface type, an unnamed pointer type, or a non-pointer type", to)) + } else { + panic(fmt.Sprintf("input type %v must be an interface type or an unnamed pointer type", to)) + } +} + +// castableTo checks whether values of type "from" can be casted to type "to". +// Nil pointer or interface "from" values are never considered castable. +// +// This function must be kept in sync with addressableValue.castTo. +func castableTo(from, to reflect.Type) bool { + switch to.Kind() { + case reflect.Interface: + // TODO: This breaks when ordinary interfaces can have type sets + // since interfaces now exist where only the value form of a type (T) + // implements the interface, but not the pointer variant (*T). + // See https://go.dev/issue/45346. + return reflect.PointerTo(from).Implements(to) + case reflect.Pointer: + // Common case for unmarshaling. + // From must be a concrete or interface type. + return reflect.PointerTo(from) == to + default: + // Common case for marshaling. + // From must be a concrete type. + return from == to + } +} + +// castTo casts va to the specified type. +// If the type is an interface, then the underlying type will always +// be a non-nil pointer to a concrete type. +// +// Requirement: castableTo(va.Type(), to) must hold. +func (va addressableValue) castTo(to reflect.Type) reflect.Value { + switch to.Kind() { + case reflect.Interface: + return va.Addr().Convert(to) + case reflect.Pointer: + return va.Addr() + default: + return va.Value + } +} + +// castableToFromAny reports whether "to" can be casted to from any +// of the dynamic types used to represent arbitrary JSON. +func castableToFromAny(to reflect.Type) bool { + for _, from := range []reflect.Type{anyType, boolType, stringType, float64Type, mapStringAnyType, sliceAnyType} { + if castableTo(from, to) { + return true + } + } + return false +} + +func wrapSkipFunc(err error, what string) error { + if err == SkipFunc { + return errors.New(what + " cannot be skipped") + } + return err +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go new file mode 100644 index 000000000..258a98247 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go @@ -0,0 +1,213 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "errors" + "reflect" +) + +// This package supports "inlining" a Go struct field, where the contents +// of the serialized field (which must be a JSON object) are treated as if +// they are part of the parent Go struct (which represents a JSON object). +// +// Generally, inlined fields are of a Go struct type, where the fields of the +// nested struct are virtually hoisted up to the parent struct using rules +// similar to how Go embedding works (but operating within the JSON namespace). +// +// However, inlined fields may also be of a Go map type with a string key +// or a RawValue. Such inlined fields are called "fallback" fields since they +// represent any arbitrary JSON object member. Explicitly named fields take +// precedence over the inlined fallback. Only one inlined fallback is allowed. + +var rawValueType = reflect.TypeOf((*RawValue)(nil)).Elem() + +// marshalInlinedFallbackAll marshals all the members in an inlined fallback. +func marshalInlinedFallbackAll(mo MarshalOptions, enc *Encoder, va addressableValue, f *structField, insertUnquotedName func([]byte) bool) error { + v := addressableValue{va.Field(f.index[0])} // addressable if struct value is addressable + if len(f.index) > 1 { + v = v.fieldByIndex(f.index[1:], false) + if !v.IsValid() { + return nil // implies a nil inlined field + } + } + v = v.indirect(false) + if !v.IsValid() { + return nil + } + + if v.Type() == rawValueType { + b := v.Interface().(RawValue) + if len(b) == 0 { // TODO: Should this be nil? What if it were all whitespace? + return nil + } + + dec := getBufferedDecoder(b, DecodeOptions{AllowDuplicateNames: true, AllowInvalidUTF8: true}) + defer putBufferedDecoder(dec) + + tok, err := dec.ReadToken() + if err != nil { + return &SemanticError{action: "marshal", GoType: rawValueType, Err: err} + } + if tok.Kind() != '{' { + err := errors.New("inlined raw value must be a JSON object") + return &SemanticError{action: "marshal", JSONKind: tok.Kind(), GoType: rawValueType, Err: err} + } + for dec.PeekKind() != '}' { + // Parse the JSON object name. + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return &SemanticError{action: "marshal", GoType: rawValueType, Err: err} + } + if insertUnquotedName != nil { + name := unescapeStringMayCopy(val, flags.isVerbatim()) + if !insertUnquotedName(name) { + return &SyntacticError{str: "duplicate name " + string(val) + " in object"} + } + } + if err := enc.WriteValue(val); err != nil { + return err + } + + // Parse the JSON object value. + val, err = dec.readValue(&flags) + if err != nil { + return &SemanticError{action: "marshal", GoType: rawValueType, Err: err} + } + if err := enc.WriteValue(val); err != nil { + return err + } + } + if _, err := dec.ReadToken(); err != nil { + return &SemanticError{action: "marshal", GoType: rawValueType, Err: err} + } + if err := dec.checkEOF(); err != nil { + return &SemanticError{action: "marshal", GoType: rawValueType, Err: err} + } + return nil + } else { + m := v // must be a map[string]V + n := m.Len() + if n == 0 { + return nil + } + mk := newAddressableValue(stringType) + mv := newAddressableValue(m.Type().Elem()) + marshalKey := func(mk addressableValue) error { + b, err := appendString(enc.UnusedBuffer(), mk.String(), !enc.options.AllowInvalidUTF8, nil) + if err != nil { + return err + } + if insertUnquotedName != nil { + isVerbatim := bytes.IndexByte(b, '\\') < 0 + name := unescapeStringMayCopy(b, isVerbatim) + if !insertUnquotedName(name) { + return &SyntacticError{str: "duplicate name " + string(b) + " in object"} + } + } + return enc.WriteValue(b) + } + marshalVal := f.fncs.marshal + if mo.Marshalers != nil { + marshalVal, _ = mo.Marshalers.lookup(marshalVal, mv.Type()) + } + if !mo.Deterministic || n <= 1 { + for iter := m.MapRange(); iter.Next(); { + mk.SetIterKey(iter) + if err := marshalKey(mk); err != nil { + return err + } + mv.Set(iter.Value()) + if err := marshalVal(mo, enc, mv); err != nil { + return err + } + } + } else { + names := getStrings(n) + for i, iter := 0, m.Value.MapRange(); i < n && iter.Next(); i++ { + mk.SetIterKey(iter) + (*names)[i] = mk.String() + } + names.Sort() + for _, name := range *names { + mk.SetString(name) + if err := marshalKey(mk); err != nil { + return err + } + // TODO(https://go.dev/issue/57061): Use mv.SetMapIndexOf. + mv.Set(m.MapIndex(mk.Value)) + if err := marshalVal(mo, enc, mv); err != nil { + return err + } + } + putStrings(names) + } + return nil + } +} + +// unmarshalInlinedFallbackNext unmarshals only the next member in an inlined fallback. +func unmarshalInlinedFallbackNext(uo UnmarshalOptions, dec *Decoder, va addressableValue, f *structField, quotedName, unquotedName []byte) error { + v := addressableValue{va.Field(f.index[0])} // addressable if struct value is addressable + if len(f.index) > 1 { + v = v.fieldByIndex(f.index[1:], true) + } + v = v.indirect(true) + + if v.Type() == rawValueType { + b := v.Addr().Interface().(*RawValue) + if len(*b) == 0 { // TODO: Should this be nil? What if it were all whitespace? + *b = append(*b, '{') + } else { + *b = trimSuffixWhitespace(*b) + if hasSuffixByte(*b, '}') { + // TODO: When merging into an object for the first time, + // should we verify that it is valid? + *b = trimSuffixByte(*b, '}') + *b = trimSuffixWhitespace(*b) + if !hasSuffixByte(*b, ',') && !hasSuffixByte(*b, '{') { + *b = append(*b, ',') + } + } else { + err := errors.New("inlined raw value must be a JSON object") + return &SemanticError{action: "unmarshal", GoType: rawValueType, Err: err} + } + } + *b = append(*b, quotedName...) + *b = append(*b, ':') + rawValue, err := dec.ReadValue() + if err != nil { + return err + } + *b = append(*b, rawValue...) + *b = append(*b, '}') + return nil + } else { + name := string(unquotedName) // TODO: Intern this? + + m := v // must be a map[string]V + if m.IsNil() { + m.Set(reflect.MakeMap(m.Type())) + } + mk := reflect.ValueOf(name) + mv := newAddressableValue(v.Type().Elem()) // TODO: Cache across calls? + if v2 := m.MapIndex(mk); v2.IsValid() { + mv.Set(v2) + } + + unmarshal := f.fncs.unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, mv.Type()) + } + err := unmarshal(uo, dec, mv) + m.SetMapIndex(mk, mv.Value) + if err != nil { + return err + } + return nil + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go new file mode 100644 index 000000000..20899c868 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go @@ -0,0 +1,229 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "encoding" + "errors" + "reflect" +) + +// Interfaces for custom serialization. +var ( + jsonMarshalerV1Type = reflect.TypeOf((*MarshalerV1)(nil)).Elem() + jsonMarshalerV2Type = reflect.TypeOf((*MarshalerV2)(nil)).Elem() + jsonUnmarshalerV1Type = reflect.TypeOf((*UnmarshalerV1)(nil)).Elem() + jsonUnmarshalerV2Type = reflect.TypeOf((*UnmarshalerV2)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// MarshalerV1 is implemented by types that can marshal themselves. +// It is recommended that types implement MarshalerV2 unless the implementation +// is trying to avoid a hard dependency on the "jsontext" package. +// +// It is recommended that implementations return a buffer that is safe +// for the caller to retain and potentially mutate. +type MarshalerV1 interface { + MarshalJSON() ([]byte, error) +} + +// MarshalerV2 is implemented by types that can marshal themselves. +// It is recommended that types implement MarshalerV2 instead of MarshalerV1 +// since this is both more performant and flexible. +// If a type implements both MarshalerV1 and MarshalerV2, +// then MarshalerV2 takes precedence. In such a case, both implementations +// should aim to have equivalent behavior for the default marshal options. +// +// The implementation must write only one JSON value to the Encoder and +// must not retain the pointer to Encoder. +type MarshalerV2 interface { + MarshalNextJSON(MarshalOptions, *Encoder) error + + // TODO: Should users call the MarshalOptions.MarshalNext method or + // should/can they call this method directly? Does it matter? +} + +// UnmarshalerV1 is implemented by types that can unmarshal themselves. +// It is recommended that types implement UnmarshalerV2 unless +// the implementation is trying to avoid a hard dependency on this package. +// +// The input can be assumed to be a valid encoding of a JSON value +// if called from unmarshal functionality in this package. +// UnmarshalJSON must copy the JSON data if it is retained after returning. +// It is recommended that UnmarshalJSON implement merge semantics when +// unmarshaling into a pre-populated value. +// +// Implementations must not retain or mutate the input []byte. +type UnmarshalerV1 interface { + UnmarshalJSON([]byte) error +} + +// UnmarshalerV2 is implemented by types that can unmarshal themselves. +// It is recommended that types implement UnmarshalerV2 instead of UnmarshalerV1 +// since this is both more performant and flexible. +// If a type implements both UnmarshalerV1 and UnmarshalerV2, +// then UnmarshalerV2 takes precedence. In such a case, both implementations +// should aim to have equivalent behavior for the default unmarshal options. +// +// The implementation must read only one JSON value from the Decoder. +// It is recommended that UnmarshalNextJSON implement merge semantics when +// unmarshaling into a pre-populated value. +// +// Implementations must not retain the pointer to Decoder. +type UnmarshalerV2 interface { + UnmarshalNextJSON(UnmarshalOptions, *Decoder) error + + // TODO: Should users call the UnmarshalOptions.UnmarshalNext method or + // should/can they call this method directly? Does it matter? +} + +func makeMethodArshaler(fncs *arshaler, t reflect.Type) *arshaler { + // Avoid injecting method arshaler on the pointer or interface version + // to avoid ever calling the method on a nil pointer or interface receiver. + // Let it be injected on the value receiver (which is always addressable). + if t.Kind() == reflect.Pointer || t.Kind() == reflect.Interface { + return fncs + } + + // Handle custom marshaler. + switch which, needAddr := implementsWhich(t, jsonMarshalerV2Type, jsonMarshalerV1Type, textMarshalerType); which { + case jsonMarshalerV2Type: + fncs.nonDefault = true + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + prevDepth, prevLength := enc.tokens.depthLength() + err := va.addrWhen(needAddr).Interface().(MarshalerV2).MarshalNextJSON(mo, enc) + currDepth, currLength := enc.tokens.depthLength() + if (prevDepth != currDepth || prevLength+1 != currLength) && err == nil { + err = errors.New("must write exactly one JSON value") + } + if err != nil { + err = wrapSkipFunc(err, "marshal method") + // TODO: Avoid wrapping semantic or I/O errors. + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + return nil + } + case jsonMarshalerV1Type: + fncs.nonDefault = true + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + marshaler := va.addrWhen(needAddr).Interface().(MarshalerV1) + val, err := marshaler.MarshalJSON() + if err != nil { + err = wrapSkipFunc(err, "marshal method") + // TODO: Avoid wrapping semantic errors. + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + if err := enc.WriteValue(val); err != nil { + // TODO: Avoid wrapping semantic or I/O errors. + return &SemanticError{action: "marshal", JSONKind: RawValue(val).Kind(), GoType: t, Err: err} + } + return nil + } + case textMarshalerType: + fncs.nonDefault = true + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + marshaler := va.addrWhen(needAddr).Interface().(encoding.TextMarshaler) + s, err := marshaler.MarshalText() + if err != nil { + err = wrapSkipFunc(err, "marshal method") + // TODO: Avoid wrapping semantic errors. + return &SemanticError{action: "marshal", JSONKind: '"', GoType: t, Err: err} + } + val := enc.UnusedBuffer() + val, err = appendString(val, string(s), true, nil) + if err != nil { + return &SemanticError{action: "marshal", JSONKind: '"', GoType: t, Err: err} + } + if err := enc.WriteValue(val); err != nil { + // TODO: Avoid wrapping syntactic or I/O errors. + return &SemanticError{action: "marshal", JSONKind: '"', GoType: t, Err: err} + } + return nil + } + } + + // Handle custom unmarshaler. + switch which, needAddr := implementsWhich(t, jsonUnmarshalerV2Type, jsonUnmarshalerV1Type, textUnmarshalerType); which { + case jsonUnmarshalerV2Type: + fncs.nonDefault = true + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + prevDepth, prevLength := dec.tokens.depthLength() + err := va.addrWhen(needAddr).Interface().(UnmarshalerV2).UnmarshalNextJSON(uo, dec) + currDepth, currLength := dec.tokens.depthLength() + if (prevDepth != currDepth || prevLength+1 != currLength) && err == nil { + err = errors.New("must read exactly one JSON value") + } + if err != nil { + err = wrapSkipFunc(err, "unmarshal method") + // TODO: Avoid wrapping semantic, syntactic, or I/O errors. + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + return nil + } + case jsonUnmarshalerV1Type: + fncs.nonDefault = true + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + val, err := dec.ReadValue() + if err != nil { + return err // must be a syntactic or I/O error + } + unmarshaler := va.addrWhen(needAddr).Interface().(UnmarshalerV1) + if err := unmarshaler.UnmarshalJSON(val); err != nil { + err = wrapSkipFunc(err, "unmarshal method") + // TODO: Avoid wrapping semantic, syntactic, or I/O errors. + return &SemanticError{action: "unmarshal", JSONKind: val.Kind(), GoType: t, Err: err} + } + return nil + } + case textUnmarshalerType: + fncs.nonDefault = true + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err // must be a syntactic or I/O error + } + if val.Kind() != '"' { + err = errors.New("JSON value must be string type") + return &SemanticError{action: "unmarshal", JSONKind: val.Kind(), GoType: t, Err: err} + } + s := unescapeStringMayCopy(val, flags.isVerbatim()) + unmarshaler := va.addrWhen(needAddr).Interface().(encoding.TextUnmarshaler) + if err := unmarshaler.UnmarshalText(s); err != nil { + err = wrapSkipFunc(err, "unmarshal method") + // TODO: Avoid wrapping semantic, syntactic, or I/O errors. + return &SemanticError{action: "unmarshal", JSONKind: val.Kind(), GoType: t, Err: err} + } + return nil + } + } + + return fncs +} + +// implementsWhich is like t.Implements(ifaceType) for a list of interfaces, +// but checks whether either t or reflect.PointerTo(t) implements the interface. +// It returns the first interface type that matches and whether a value of t +// needs to be addressed first before it implements the interface. +func implementsWhich(t reflect.Type, ifaceTypes ...reflect.Type) (which reflect.Type, needAddr bool) { + for _, ifaceType := range ifaceTypes { + switch { + case t.Implements(ifaceType): + return ifaceType, false + case reflect.PointerTo(t).Implements(ifaceType): + return ifaceType, true + } + } + return nil, false +} + +// addrWhen returns va.Addr if addr is specified, otherwise it returns itself. +func (va addressableValue) addrWhen(addr bool) reflect.Value { + if addr { + return va.Addr() + } + return va.Value +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go new file mode 100644 index 000000000..fc8d5b007 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go @@ -0,0 +1,241 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "errors" + "fmt" + "reflect" + "strings" + "time" +) + +var ( + timeDurationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + timeTimeType = reflect.TypeOf((*time.Time)(nil)).Elem() +) + +func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler { + // Ideally, time types would implement MarshalerV2 and UnmarshalerV2, + // but that would incur a dependency on package json from package time. + // Given how widely used time is, it is more acceptable that we incur a + // dependency on time from json. + // + // Injecting the arshaling functionality like this will not be identical + // to actually declaring methods on the time types since embedding of the + // time types will not be able to forward this functionality. + switch t { + case timeDurationType: + fncs.nonDefault = true + marshalNanos := fncs.marshal + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + if mo.format == "nanos" { + mo.format = "" + return marshalNanos(mo, enc, va) + } else { + return newInvalidFormatError("marshal", t, mo.format) + } + } + + td := va.Interface().(time.Duration) + b := enc.UnusedBuffer() + b = append(b, '"') + b = append(b, td.String()...) // never contains special characters + b = append(b, '"') + return enc.WriteValue(b) + } + unmarshalNanos := fncs.unmarshal + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + // TODO: Should there be a flag that specifies that we can unmarshal + // from either form since there would be no ambiguity? + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + if uo.format == "nanos" { + uo.format = "" + return unmarshalNanos(uo, dec, va) + } else { + return newInvalidFormatError("unmarshal", t, uo.format) + } + } + + var flags valueFlags + td := va.Addr().Interface().(*time.Duration) + val, err := dec.readValue(&flags) + if err != nil { + return err + } + switch k := val.Kind(); k { + case 'n': + *td = time.Duration(0) + return nil + case '"': + val = unescapeStringMayCopy(val, flags.isVerbatim()) + td2, err := time.ParseDuration(string(val)) + if err != nil { + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + *td = td2 + return nil + default: + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + } + case timeTimeType: + fncs.nonDefault = true + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + format := time.RFC3339Nano + isRFC3339 := true + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + var err error + format, isRFC3339, err = checkTimeFormat(mo.format) + if err != nil { + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + } + + tt := va.Interface().(time.Time) + b := enc.UnusedBuffer() + b = append(b, '"') + b = tt.AppendFormat(b, format) + b = append(b, '"') + if isRFC3339 { + // Not all Go timestamps can be represented as valid RFC 3339. + // Explicitly check for these edge cases. + // See https://go.dev/issue/4556 and https://go.dev/issue/54580. + var err error + switch b := b[len(`"`) : len(b)-len(`"`)]; { + case b[len("9999")] != '-': // year must be exactly 4 digits wide + err = errors.New("year outside of range [0,9999]") + case b[len(b)-1] != 'Z': + c := b[len(b)-len("Z07:00")] + if ('0' <= c && c <= '9') || parseDec2(b[len(b)-len("07:00"):]) >= 24 { + err = errors.New("timezone hour outside of range [0,23]") + } + } + if err != nil { + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + return enc.WriteValue(b) // RFC 3339 never needs JSON escaping + } + // The format may contain special characters that need escaping. + // Verify that the result is a valid JSON string (common case), + // otherwise escape the string correctly (slower case). + if consumeSimpleString(b) != len(b) { + b, _ = appendString(nil, string(b[len(`"`):len(b)-len(`"`)]), true, nil) + } + return enc.WriteValue(b) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + format := time.RFC3339 + isRFC3339 := true + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + var err error + format, isRFC3339, err = checkTimeFormat(uo.format) + if err != nil { + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + } + + var flags valueFlags + tt := va.Addr().Interface().(*time.Time) + val, err := dec.readValue(&flags) + if err != nil { + return err + } + k := val.Kind() + switch k { + case 'n': + *tt = time.Time{} + return nil + case '"': + val = unescapeStringMayCopy(val, flags.isVerbatim()) + tt2, err := time.Parse(format, string(val)) + if isRFC3339 && err == nil { + // TODO(https://go.dev/issue/54580): RFC 3339 specifies + // the exact grammar of a valid timestamp. However, + // the parsing functionality in "time" is too loose and + // incorrectly accepts invalid timestamps as valid. + // Remove these manual checks when "time" checks it for us. + newParseError := func(layout, value, layoutElem, valueElem, message string) error { + return &time.ParseError{Layout: layout, Value: value, LayoutElem: layoutElem, ValueElem: valueElem, Message: message} + } + switch { + case val[len("2006-01-02T")+1] == ':': // hour must be two digits + err = newParseError(format, string(val), "15", string(val[len("2006-01-02T"):][:1]), "") + case val[len("2006-01-02T15:04:05")] == ',': // sub-second separator must be a period + err = newParseError(format, string(val), ".", ",", "") + case val[len(val)-1] != 'Z': + switch { + case parseDec2(val[len(val)-len("07:00"):]) >= 24: // timezone hour must be in range + err = newParseError(format, string(val), "Z07:00", string(val[len(val)-len("Z07:00"):]), ": timezone hour out of range") + case parseDec2(val[len(val)-len("00"):]) >= 60: // timezone minute must be in range + err = newParseError(format, string(val), "Z07:00", string(val[len(val)-len("Z07:00"):]), ": timezone minute out of range") + } + } + } + if err != nil { + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + *tt = tt2 + return nil + default: + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + } + } + return fncs +} + +func checkTimeFormat(format string) (string, bool, error) { + // We assume that an exported constant in the time package will + // always start with an uppercase ASCII letter. + if len(format) > 0 && 'A' <= format[0] && format[0] <= 'Z' { + switch format { + case "ANSIC": + return time.ANSIC, false, nil + case "UnixDate": + return time.UnixDate, false, nil + case "RubyDate": + return time.RubyDate, false, nil + case "RFC822": + return time.RFC822, false, nil + case "RFC822Z": + return time.RFC822Z, false, nil + case "RFC850": + return time.RFC850, false, nil + case "RFC1123": + return time.RFC1123, false, nil + case "RFC1123Z": + return time.RFC1123Z, false, nil + case "RFC3339": + return time.RFC3339, true, nil + case "RFC3339Nano": + return time.RFC3339Nano, true, nil + case "Kitchen": + return time.Kitchen, false, nil + case "Stamp": + return time.Stamp, false, nil + case "StampMilli": + return time.StampMilli, false, nil + case "StampMicro": + return time.StampMicro, false, nil + case "StampNano": + return time.StampNano, false, nil + default: + // Reject any format that is an exported Go identifier in case + // new format constants are added to the time package. + if strings.TrimFunc(format, isLetterOrDigit) == "" { + return "", false, fmt.Errorf("undefined format layout: %v", format) + } + } + } + return format, false, nil +} + +// parseDec2 parses b as an unsigned, base-10, 2-digit number. +// It panics if len(b) < 2. The result is undefined if digits are not base-10. +func parseDec2(b []byte) byte { + return 10*(b[0]-'0') + (b[1] - '0') +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go new file mode 100644 index 000000000..0d68b3233 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go @@ -0,0 +1,1655 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "errors" + "io" + "math" + "strconv" + "unicode/utf16" + "unicode/utf8" +) + +// NOTE: The logic for decoding is complicated by the fact that reading from +// an io.Reader into a temporary buffer means that the buffer may contain a +// truncated portion of some valid input, requiring the need to fetch more data. +// +// This file is structured in the following way: +// +// - consumeXXX functions parse an exact JSON token from a []byte. +// If the buffer appears truncated, then it returns io.ErrUnexpectedEOF. +// The consumeSimpleXXX functions are so named because they only handle +// a subset of the grammar for the JSON token being parsed. +// They do not handle the full grammar to keep these functions inlineable. +// +// - Decoder.consumeXXX methods parse the next JSON token from Decoder.buf, +// automatically fetching more input if necessary. These methods take +// a position relative to the start of Decoder.buf as an argument and +// return the end of the consumed JSON token as a position, +// also relative to the start of Decoder.buf. +// +// - In the event of an I/O errors or state machine violations, +// the implementation avoids mutating the state of Decoder +// (aside from the book-keeping needed to implement Decoder.fetch). +// For this reason, only Decoder.ReadToken and Decoder.ReadValue are +// responsible for updated Decoder.prevStart and Decoder.prevEnd. +// +// - For performance, much of the implementation uses the pattern of calling +// the inlineable consumeXXX functions first, and if more work is necessary, +// then it calls the slower Decoder.consumeXXX methods. +// TODO: Revisit this pattern if the Go compiler provides finer control +// over exactly which calls are inlined or not. + +// DecodeOptions configures how JSON decoding operates. +// The zero value is equivalent to the default settings, +// which is compliant with both RFC 7493 and RFC 8259. +type DecodeOptions struct { + requireKeyedLiterals + nonComparable + + // AllowDuplicateNames specifies that JSON objects may contain + // duplicate member names. Disabling the duplicate name check may provide + // computational and performance benefits, but breaks compliance with + // RFC 7493, section 2.3. The input will still be compliant with RFC 8259, + // which leaves the handling of duplicate names as unspecified behavior. + AllowDuplicateNames bool + + // AllowInvalidUTF8 specifies that JSON strings may contain invalid UTF-8, + // which will be mangled as the Unicode replacement character, U+FFFD. + // This causes the decoder to break compliance with + // RFC 7493, section 2.1, and RFC 8259, section 8.1. + AllowInvalidUTF8 bool +} + +// Decoder is a streaming decoder for raw JSON tokens and values. +// It is used to read a stream of top-level JSON values, +// each separated by optional whitespace characters. +// +// ReadToken and ReadValue calls may be interleaved. +// For example, the following JSON value: +// +// {"name":"value","array":[null,false,true,3.14159],"object":{"k":"v"}} +// +// can be parsed with the following calls (ignoring errors for brevity): +// +// d.ReadToken() // { +// d.ReadToken() // "name" +// d.ReadToken() // "value" +// d.ReadValue() // "array" +// d.ReadToken() // [ +// d.ReadToken() // null +// d.ReadToken() // false +// d.ReadValue() // true +// d.ReadToken() // 3.14159 +// d.ReadToken() // ] +// d.ReadValue() // "object" +// d.ReadValue() // {"k":"v"} +// d.ReadToken() // } +// +// The above is one of many possible sequence of calls and +// may not represent the most sensible method to call for any given token/value. +// For example, it is probably more common to call ReadToken to obtain a +// string token for object names. +type Decoder struct { + state + decodeBuffer + options DecodeOptions + + stringCache *stringCache // only used when unmarshaling +} + +// decodeBuffer is a buffer split into 4 segments: +// +// - buf[0:prevEnd] // already read portion of the buffer +// - buf[prevStart:prevEnd] // previously read value +// - buf[prevEnd:len(buf)] // unread portion of the buffer +// - buf[len(buf):cap(buf)] // unused portion of the buffer +// +// Invariants: +// +// 0 ≤ prevStart ≤ prevEnd ≤ len(buf) ≤ cap(buf) +type decodeBuffer struct { + peekPos int // non-zero if valid offset into buf for start of next token + peekErr error // implies peekPos is -1 + + buf []byte // may alias rd if it is a bytes.Buffer + prevStart int + prevEnd int + + // baseOffset is added to prevStart and prevEnd to obtain + // the absolute offset relative to the start of io.Reader stream. + baseOffset int64 + + rd io.Reader +} + +// NewDecoder constructs a new streaming decoder reading from r. +// +// If r is a bytes.Buffer, then the decoder parses directly from the buffer +// without first copying the contents to an intermediate buffer. +// Additional writes to the buffer must not occur while the decoder is in use. +func NewDecoder(r io.Reader) *Decoder { + return DecodeOptions{}.NewDecoder(r) +} + +// NewDecoder constructs a new streaming decoder reading from r +// configured with the provided options. +func (o DecodeOptions) NewDecoder(r io.Reader) *Decoder { + d := new(Decoder) + o.ResetDecoder(d, r) + return d +} + +// ResetDecoder resets a decoder such that it is reading afresh from r and +// configured with the provided options. +func (o DecodeOptions) ResetDecoder(d *Decoder, r io.Reader) { + if d == nil { + panic("json: invalid nil Decoder") + } + if r == nil { + panic("json: invalid nil io.Reader") + } + d.reset(nil, r, o) +} + +func (d *Decoder) reset(b []byte, r io.Reader, o DecodeOptions) { + d.state.reset() + d.decodeBuffer = decodeBuffer{buf: b, rd: r} + d.options = o +} + +// Reset resets a decoder such that it is reading afresh from r but +// keep any pre-existing decoder options. +func (d *Decoder) Reset(r io.Reader) { + d.options.ResetDecoder(d, r) +} + +var errBufferWriteAfterNext = errors.New("invalid bytes.Buffer.Write call after calling bytes.Buffer.Next") + +// fetch reads at least 1 byte from the underlying io.Reader. +// It returns io.ErrUnexpectedEOF if zero bytes were read and io.EOF was seen. +func (d *Decoder) fetch() error { + if d.rd == nil { + return io.ErrUnexpectedEOF + } + + // Inform objectNameStack that we are about to fetch new buffer content. + d.names.copyQuotedBuffer(d.buf) + + // Specialize bytes.Buffer for better performance. + if bb, ok := d.rd.(*bytes.Buffer); ok { + switch { + case bb.Len() == 0: + return io.ErrUnexpectedEOF + case len(d.buf) == 0: + d.buf = bb.Next(bb.Len()) // "read" all data in the buffer + return nil + default: + // This only occurs if a partially filled bytes.Buffer was provided + // and more data is written to it while Decoder is reading from it. + // This practice will lead to data corruption since future writes + // may overwrite the contents of the current buffer. + // + // The user is trying to use a bytes.Buffer as a pipe, + // but a bytes.Buffer is poor implementation of a pipe, + // the purpose-built io.Pipe should be used instead. + return &ioError{action: "read", err: errBufferWriteAfterNext} + } + } + + // Allocate initial buffer if empty. + if cap(d.buf) == 0 { + d.buf = make([]byte, 0, 64) + } + + // Check whether to grow the buffer. + const maxBufferSize = 4 << 10 + const growthSizeFactor = 2 // higher value is faster + const growthRateFactor = 2 // higher value is slower + // By default, grow if below the maximum buffer size. + grow := cap(d.buf) <= maxBufferSize/growthSizeFactor + // Growing can be expensive, so only grow + // if a sufficient number of bytes have been processed. + grow = grow && int64(cap(d.buf)) < d.previousOffsetEnd()/growthRateFactor + // If prevStart==0, then fetch was called in order to fetch more data + // to finish consuming a large JSON value contiguously. + // Grow if less than 25% of the remaining capacity is available. + // Note that this may cause the input buffer to exceed maxBufferSize. + grow = grow || (d.prevStart == 0 && len(d.buf) >= 3*cap(d.buf)/4) + + if grow { + // Allocate a new buffer and copy the contents of the old buffer over. + // TODO: Provide a hard limit on the maximum internal buffer size? + buf := make([]byte, 0, cap(d.buf)*growthSizeFactor) + d.buf = append(buf, d.buf[d.prevStart:]...) + } else { + // Move unread portion of the data to the front. + n := copy(d.buf[:cap(d.buf)], d.buf[d.prevStart:]) + d.buf = d.buf[:n] + } + d.baseOffset += int64(d.prevStart) + d.prevEnd -= d.prevStart + d.prevStart = 0 + + // Read more data into the internal buffer. + for { + n, err := d.rd.Read(d.buf[len(d.buf):cap(d.buf)]) + switch { + case n > 0: + d.buf = d.buf[:len(d.buf)+n] + return nil // ignore errors if any bytes are read + case err == io.EOF: + return io.ErrUnexpectedEOF + case err != nil: + return &ioError{action: "read", err: err} + default: + continue // Read returned (0, nil) + } + } +} + +const invalidateBufferByte = '#' // invalid starting character for JSON grammar + +// invalidatePreviousRead invalidates buffers returned by Peek and Read calls +// so that the first byte is an invalid character. +// This Hyrum-proofs the API against faulty application code that assumes +// values returned by ReadValue remain valid past subsequent Read calls. +func (d *decodeBuffer) invalidatePreviousRead() { + // Avoid mutating the buffer if d.rd is nil which implies that d.buf + // is provided by the user code and may not expect mutations. + isBytesBuffer := func(r io.Reader) bool { + _, ok := r.(*bytes.Buffer) + return ok + } + if d.rd != nil && !isBytesBuffer(d.rd) && d.prevStart < d.prevEnd && uint(d.prevStart) < uint(len(d.buf)) { + d.buf[d.prevStart] = invalidateBufferByte + d.prevStart = d.prevEnd + } +} + +// needMore reports whether there are no more unread bytes. +func (d *decodeBuffer) needMore(pos int) bool { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + return pos == len(d.buf) +} + +// injectSyntacticErrorWithPosition wraps a SyntacticError with the position, +// otherwise it returns the error as is. +// It takes a position relative to the start of the start of d.buf. +func (d *decodeBuffer) injectSyntacticErrorWithPosition(err error, pos int) error { + if serr, ok := err.(*SyntacticError); ok { + return serr.withOffset(d.baseOffset + int64(pos)) + } + return err +} + +func (d *decodeBuffer) previousOffsetStart() int64 { return d.baseOffset + int64(d.prevStart) } +func (d *decodeBuffer) previousOffsetEnd() int64 { return d.baseOffset + int64(d.prevEnd) } +func (d *decodeBuffer) previousBuffer() []byte { return d.buf[d.prevStart:d.prevEnd] } +func (d *decodeBuffer) unreadBuffer() []byte { return d.buf[d.prevEnd:len(d.buf)] } + +// PeekKind retrieves the next token kind, but does not advance the read offset. +// It returns 0 if there are no more tokens. +func (d *Decoder) PeekKind() Kind { + // Check whether we have a cached peek result. + if d.peekPos > 0 { + return Kind(d.buf[d.peekPos]).normalize() + } + + var err error + d.invalidatePreviousRead() + pos := d.prevEnd + + // Consume leading whitespace. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + if err == io.ErrUnexpectedEOF && d.tokens.depth() == 1 { + err = io.EOF // EOF possibly if no Tokens present after top-level value + } + d.peekPos, d.peekErr = -1, err + return invalidKind + } + } + + // Consume colon or comma. + var delim byte + if c := d.buf[pos]; c == ':' || c == ',' { + delim = c + pos += 1 + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + d.peekPos, d.peekErr = -1, err + return invalidKind + } + } + } + next := Kind(d.buf[pos]).normalize() + if d.tokens.needDelim(next) != delim { + pos = d.prevEnd // restore position to right after leading whitespace + pos += consumeWhitespace(d.buf[pos:]) + err = d.tokens.checkDelim(delim, next) + err = d.injectSyntacticErrorWithPosition(err, pos) + d.peekPos, d.peekErr = -1, err + return invalidKind + } + + // This may set peekPos to zero, which is indistinguishable from + // the uninitialized state. While a small hit to performance, it is correct + // since ReadValue and ReadToken will disregard the cached result and + // recompute the next kind. + d.peekPos, d.peekErr = pos, nil + return next +} + +// SkipValue is semantically equivalent to calling ReadValue and discarding +// the result except that memory is not wasted trying to hold the entire result. +func (d *Decoder) SkipValue() error { + switch d.PeekKind() { + case '{', '[': + // For JSON objects and arrays, keep skipping all tokens + // until the depth matches the starting depth. + depth := d.tokens.depth() + for { + if _, err := d.ReadToken(); err != nil { + return err + } + if depth >= d.tokens.depth() { + return nil + } + } + default: + // Trying to skip a value when the next token is a '}' or ']' + // will result in an error being returned here. + if _, err := d.ReadValue(); err != nil { + return err + } + return nil + } +} + +// ReadToken reads the next Token, advancing the read offset. +// The returned token is only valid until the next Peek, Read, or Skip call. +// It returns io.EOF if there are no more tokens. +func (d *Decoder) ReadToken() (Token, error) { + // Determine the next kind. + var err error + var next Kind + pos := d.peekPos + if pos != 0 { + // Use cached peek result. + if d.peekErr != nil { + err := d.peekErr + d.peekPos, d.peekErr = 0, nil // possibly a transient I/O error + return Token{}, err + } + next = Kind(d.buf[pos]).normalize() + d.peekPos = 0 // reset cache + } else { + d.invalidatePreviousRead() + pos = d.prevEnd + + // Consume leading whitespace. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + if err == io.ErrUnexpectedEOF && d.tokens.depth() == 1 { + err = io.EOF // EOF possibly if no Tokens present after top-level value + } + return Token{}, err + } + } + + // Consume colon or comma. + var delim byte + if c := d.buf[pos]; c == ':' || c == ',' { + delim = c + pos += 1 + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return Token{}, err + } + } + } + next = Kind(d.buf[pos]).normalize() + if d.tokens.needDelim(next) != delim { + pos = d.prevEnd // restore position to right after leading whitespace + pos += consumeWhitespace(d.buf[pos:]) + err = d.tokens.checkDelim(delim, next) + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + } + + // Handle the next token. + var n int + switch next { + case 'n': + if consumeNull(d.buf[pos:]) == 0 { + pos, err = d.consumeLiteral(pos, "null") + if err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + } else { + pos += len("null") + } + if err = d.tokens.appendLiteral(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos-len("null")) // report position at start of literal + } + d.prevStart, d.prevEnd = pos, pos + return Null, nil + + case 'f': + if consumeFalse(d.buf[pos:]) == 0 { + pos, err = d.consumeLiteral(pos, "false") + if err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + } else { + pos += len("false") + } + if err = d.tokens.appendLiteral(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos-len("false")) // report position at start of literal + } + d.prevStart, d.prevEnd = pos, pos + return False, nil + + case 't': + if consumeTrue(d.buf[pos:]) == 0 { + pos, err = d.consumeLiteral(pos, "true") + if err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + } else { + pos += len("true") + } + if err = d.tokens.appendLiteral(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos-len("true")) // report position at start of literal + } + d.prevStart, d.prevEnd = pos, pos + return True, nil + + case '"': + var flags valueFlags // TODO: Preserve this in Token? + if n = consumeSimpleString(d.buf[pos:]); n == 0 { + oldAbsPos := d.baseOffset + int64(pos) + pos, err = d.consumeString(&flags, pos) + newAbsPos := d.baseOffset + int64(pos) + n = int(newAbsPos - oldAbsPos) + if err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + } else { + pos += n + } + if !d.options.AllowDuplicateNames && d.tokens.last.needObjectName() { + if !d.tokens.last.isValidNamespace() { + return Token{}, errInvalidNamespace + } + if d.tokens.last.isActiveNamespace() && !d.namespaces.last().insertQuoted(d.buf[pos-n:pos], flags.isVerbatim()) { + err = &SyntacticError{str: "duplicate name " + string(d.buf[pos-n:pos]) + " in object"} + return Token{}, d.injectSyntacticErrorWithPosition(err, pos-n) // report position at start of string + } + d.names.replaceLastQuotedOffset(pos - n) // only replace if insertQuoted succeeds + } + if err = d.tokens.appendString(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos-n) // report position at start of string + } + d.prevStart, d.prevEnd = pos-n, pos + return Token{raw: &d.decodeBuffer, num: uint64(d.previousOffsetStart())}, nil + + case '0': + // NOTE: Since JSON numbers are not self-terminating, + // we need to make sure that the next byte is not part of a number. + if n = consumeSimpleNumber(d.buf[pos:]); n == 0 || d.needMore(pos+n) { + oldAbsPos := d.baseOffset + int64(pos) + pos, err = d.consumeNumber(pos) + newAbsPos := d.baseOffset + int64(pos) + n = int(newAbsPos - oldAbsPos) + if err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + } else { + pos += n + } + if err = d.tokens.appendNumber(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos-n) // report position at start of number + } + d.prevStart, d.prevEnd = pos-n, pos + return Token{raw: &d.decodeBuffer, num: uint64(d.previousOffsetStart())}, nil + + case '{': + if err = d.tokens.pushObject(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + if !d.options.AllowDuplicateNames { + d.names.push() + d.namespaces.push() + } + pos += 1 + d.prevStart, d.prevEnd = pos, pos + return ObjectStart, nil + + case '}': + if err = d.tokens.popObject(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + if !d.options.AllowDuplicateNames { + d.names.pop() + d.namespaces.pop() + } + pos += 1 + d.prevStart, d.prevEnd = pos, pos + return ObjectEnd, nil + + case '[': + if err = d.tokens.pushArray(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + pos += 1 + d.prevStart, d.prevEnd = pos, pos + return ArrayStart, nil + + case ']': + if err = d.tokens.popArray(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + pos += 1 + d.prevStart, d.prevEnd = pos, pos + return ArrayEnd, nil + + default: + err = newInvalidCharacterError(d.buf[pos:], "at start of token") + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } +} + +type valueFlags uint + +const ( + _ valueFlags = (1 << iota) / 2 // powers of two starting with zero + + stringNonVerbatim // string cannot be naively treated as valid UTF-8 + stringNonCanonical // string not formatted according to RFC 8785, section 3.2.2.2. + // TODO: Track whether a number is a non-integer? +) + +func (f *valueFlags) set(f2 valueFlags) { *f |= f2 } +func (f valueFlags) isVerbatim() bool { return f&stringNonVerbatim == 0 } +func (f valueFlags) isCanonical() bool { return f&stringNonCanonical == 0 } + +// ReadValue returns the next raw JSON value, advancing the read offset. +// The value is stripped of any leading or trailing whitespace. +// The returned value is only valid until the next Peek, Read, or Skip call and +// may not be mutated while the Decoder remains in use. +// If the decoder is currently at the end token for an object or array, +// then it reports a SyntacticError and the internal state remains unchanged. +// It returns io.EOF if there are no more values. +func (d *Decoder) ReadValue() (RawValue, error) { + var flags valueFlags + return d.readValue(&flags) +} +func (d *Decoder) readValue(flags *valueFlags) (RawValue, error) { + // Determine the next kind. + var err error + var next Kind + pos := d.peekPos + if pos != 0 { + // Use cached peek result. + if d.peekErr != nil { + err := d.peekErr + d.peekPos, d.peekErr = 0, nil // possibly a transient I/O error + return nil, err + } + next = Kind(d.buf[pos]).normalize() + d.peekPos = 0 // reset cache + } else { + d.invalidatePreviousRead() + pos = d.prevEnd + + // Consume leading whitespace. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + if err == io.ErrUnexpectedEOF && d.tokens.depth() == 1 { + err = io.EOF // EOF possibly if no Tokens present after top-level value + } + return nil, err + } + } + + // Consume colon or comma. + var delim byte + if c := d.buf[pos]; c == ':' || c == ',' { + delim = c + pos += 1 + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return nil, err + } + } + } + next = Kind(d.buf[pos]).normalize() + if d.tokens.needDelim(next) != delim { + pos = d.prevEnd // restore position to right after leading whitespace + pos += consumeWhitespace(d.buf[pos:]) + err = d.tokens.checkDelim(delim, next) + return nil, d.injectSyntacticErrorWithPosition(err, pos) + } + } + + // Handle the next value. + oldAbsPos := d.baseOffset + int64(pos) + pos, err = d.consumeValue(flags, pos) + newAbsPos := d.baseOffset + int64(pos) + n := int(newAbsPos - oldAbsPos) + if err != nil { + return nil, d.injectSyntacticErrorWithPosition(err, pos) + } + switch next { + case 'n', 't', 'f': + err = d.tokens.appendLiteral() + case '"': + if !d.options.AllowDuplicateNames && d.tokens.last.needObjectName() { + if !d.tokens.last.isValidNamespace() { + err = errInvalidNamespace + break + } + if d.tokens.last.isActiveNamespace() && !d.namespaces.last().insertQuoted(d.buf[pos-n:pos], flags.isVerbatim()) { + err = &SyntacticError{str: "duplicate name " + string(d.buf[pos-n:pos]) + " in object"} + break + } + d.names.replaceLastQuotedOffset(pos - n) // only replace if insertQuoted succeeds + } + err = d.tokens.appendString() + case '0': + err = d.tokens.appendNumber() + case '{': + if err = d.tokens.pushObject(); err != nil { + break + } + if err = d.tokens.popObject(); err != nil { + panic("BUG: popObject should never fail immediately after pushObject: " + err.Error()) + } + case '[': + if err = d.tokens.pushArray(); err != nil { + break + } + if err = d.tokens.popArray(); err != nil { + panic("BUG: popArray should never fail immediately after pushArray: " + err.Error()) + } + } + if err != nil { + return nil, d.injectSyntacticErrorWithPosition(err, pos-n) // report position at start of value + } + d.prevEnd = pos + d.prevStart = pos - n + return d.buf[pos-n : pos : pos], nil +} + +// checkEOF verifies that the input has no more data. +func (d *Decoder) checkEOF() error { + switch pos, err := d.consumeWhitespace(d.prevEnd); err { + case nil: + return newInvalidCharacterError(d.buf[pos:], "after top-level value") + case io.ErrUnexpectedEOF: + return nil + default: + return err + } +} + +// consumeWhitespace consumes all whitespace starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the last whitespace. +// If it returns nil, there is guaranteed to at least be one unread byte. +// +// The following pattern is common in this implementation: +// +// pos += consumeWhitespace(d.buf[pos:]) +// if d.needMore(pos) { +// if pos, err = d.consumeWhitespace(pos); err != nil { +// return ... +// } +// } +// +// It is difficult to simplify this without sacrificing performance since +// consumeWhitespace must be inlined. The body of the if statement is +// executed only in rare situations where we need to fetch more data. +// Since fetching may return an error, we also need to check the error. +func (d *Decoder) consumeWhitespace(pos int) (newPos int, err error) { + for { + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + absPos := d.baseOffset + int64(pos) + err = d.fetch() // will mutate d.buf and invalidate pos + pos = int(absPos - d.baseOffset) + if err != nil { + return pos, err + } + continue + } + return pos, nil + } +} + +// consumeValue consumes a single JSON value starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the value. +func (d *Decoder) consumeValue(flags *valueFlags, pos int) (newPos int, err error) { + for { + var n int + var err error + switch next := Kind(d.buf[pos]).normalize(); next { + case 'n': + if n = consumeNull(d.buf[pos:]); n == 0 { + n, err = consumeLiteral(d.buf[pos:], "null") + } + case 'f': + if n = consumeFalse(d.buf[pos:]); n == 0 { + n, err = consumeLiteral(d.buf[pos:], "false") + } + case 't': + if n = consumeTrue(d.buf[pos:]); n == 0 { + n, err = consumeLiteral(d.buf[pos:], "true") + } + case '"': + if n = consumeSimpleString(d.buf[pos:]); n == 0 { + return d.consumeString(flags, pos) + } + case '0': + // NOTE: Since JSON numbers are not self-terminating, + // we need to make sure that the next byte is not part of a number. + if n = consumeSimpleNumber(d.buf[pos:]); n == 0 || d.needMore(pos+n) { + return d.consumeNumber(pos) + } + case '{': + return d.consumeObject(flags, pos) + case '[': + return d.consumeArray(flags, pos) + default: + return pos, newInvalidCharacterError(d.buf[pos:], "at start of value") + } + if err == io.ErrUnexpectedEOF { + absPos := d.baseOffset + int64(pos) + err = d.fetch() // will mutate d.buf and invalidate pos + pos = int(absPos - d.baseOffset) + if err != nil { + return pos, err + } + continue + } + return pos + n, err + } +} + +// consumeLiteral consumes a single JSON literal starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the literal. +func (d *Decoder) consumeLiteral(pos int, lit string) (newPos int, err error) { + for { + n, err := consumeLiteral(d.buf[pos:], lit) + if err == io.ErrUnexpectedEOF { + absPos := d.baseOffset + int64(pos) + err = d.fetch() // will mutate d.buf and invalidate pos + pos = int(absPos - d.baseOffset) + if err != nil { + return pos, err + } + continue + } + return pos + n, err + } +} + +// consumeString consumes a single JSON string starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the string. +func (d *Decoder) consumeString(flags *valueFlags, pos int) (newPos int, err error) { + var n int + for { + n, err = consumeStringResumable(flags, d.buf[pos:], n, !d.options.AllowInvalidUTF8) + if err == io.ErrUnexpectedEOF { + absPos := d.baseOffset + int64(pos) + err = d.fetch() // will mutate d.buf and invalidate pos + pos = int(absPos - d.baseOffset) + if err != nil { + return pos, err + } + continue + } + return pos + n, err + } +} + +// consumeNumber consumes a single JSON number starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the number. +func (d *Decoder) consumeNumber(pos int) (newPos int, err error) { + var n int + var state consumeNumberState + for { + n, state, err = consumeNumberResumable(d.buf[pos:], n, state) + // NOTE: Since JSON numbers are not self-terminating, + // we need to make sure that the next byte is not part of a number. + if err == io.ErrUnexpectedEOF || d.needMore(pos+n) { + mayTerminate := err == nil + absPos := d.baseOffset + int64(pos) + err = d.fetch() // will mutate d.buf and invalidate pos + pos = int(absPos - d.baseOffset) + if err != nil { + if mayTerminate && err == io.ErrUnexpectedEOF { + return pos + n, nil + } + return pos, err + } + continue + } + return pos + n, err + } +} + +// consumeObject consumes a single JSON object starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the object. +func (d *Decoder) consumeObject(flags *valueFlags, pos int) (newPos int, err error) { + var n int + var names *objectNamespace + if !d.options.AllowDuplicateNames { + d.namespaces.push() + defer d.namespaces.pop() + names = d.namespaces.last() + } + + // Handle before start. + if d.buf[pos] != '{' { + panic("BUG: consumeObject must be called with a buffer that starts with '{'") + } + pos++ + + // Handle after start. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + if d.buf[pos] == '}' { + pos++ + return pos, nil + } + + for { + // Handle before name. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + var flags2 valueFlags + if n = consumeSimpleString(d.buf[pos:]); n == 0 { + oldAbsPos := d.baseOffset + int64(pos) + pos, err = d.consumeString(&flags2, pos) + newAbsPos := d.baseOffset + int64(pos) + n = int(newAbsPos - oldAbsPos) + flags.set(flags2) + if err != nil { + return pos, err + } + } else { + pos += n + } + if !d.options.AllowDuplicateNames && !names.insertQuoted(d.buf[pos-n:pos], flags2.isVerbatim()) { + return pos - n, &SyntacticError{str: "duplicate name " + string(d.buf[pos-n:pos]) + " in object"} + } + + // Handle after name. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + if d.buf[pos] != ':' { + return pos, newInvalidCharacterError(d.buf[pos:], "after object name (expecting ':')") + } + pos++ + + // Handle before value. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + pos, err = d.consumeValue(flags, pos) + if err != nil { + return pos, err + } + + // Handle after value. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + switch d.buf[pos] { + case ',': + pos++ + continue + case '}': + pos++ + return pos, nil + default: + return pos, newInvalidCharacterError(d.buf[pos:], "after object value (expecting ',' or '}')") + } + } +} + +// consumeArray consumes a single JSON array starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the array. +func (d *Decoder) consumeArray(flags *valueFlags, pos int) (newPos int, err error) { + // Handle before start. + if d.buf[pos] != '[' { + panic("BUG: consumeArray must be called with a buffer that starts with '['") + } + pos++ + + // Handle after start. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + if d.buf[pos] == ']' { + pos++ + return pos, nil + } + + for { + // Handle before value. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + pos, err = d.consumeValue(flags, pos) + if err != nil { + return pos, err + } + + // Handle after value. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + switch d.buf[pos] { + case ',': + pos++ + continue + case ']': + pos++ + return pos, nil + default: + return pos, newInvalidCharacterError(d.buf[pos:], "after array value (expecting ',' or ']')") + } + } +} + +// InputOffset returns the current input byte offset. It gives the location +// of the next byte immediately after the most recently returned token or value. +// The number of bytes actually read from the underlying io.Reader may be more +// than this offset due to internal buffering effects. +func (d *Decoder) InputOffset() int64 { + return d.previousOffsetEnd() +} + +// UnreadBuffer returns the data remaining in the unread buffer, +// which may contain zero or more bytes. +// The returned buffer must not be mutated while Decoder continues to be used. +// The buffer contents are valid until the next Peek, Read, or Skip call. +func (d *Decoder) UnreadBuffer() []byte { + return d.unreadBuffer() +} + +// StackDepth returns the depth of the state machine for read JSON data. +// Each level on the stack represents a nested JSON object or array. +// It is incremented whenever an ObjectStart or ArrayStart token is encountered +// and decremented whenever an ObjectEnd or ArrayEnd token is encountered. +// The depth is zero-indexed, where zero represents the top-level JSON value. +func (d *Decoder) StackDepth() int { + // NOTE: Keep in sync with Encoder.StackDepth. + return d.tokens.depth() - 1 +} + +// StackIndex returns information about the specified stack level. +// It must be a number between 0 and StackDepth, inclusive. +// For each level, it reports the kind: +// +// - 0 for a level of zero, +// - '{' for a level representing a JSON object, and +// - '[' for a level representing a JSON array. +// +// It also reports the length of that JSON object or array. +// Each name and value in a JSON object is counted separately, +// so the effective number of members would be half the length. +// A complete JSON object must have an even length. +func (d *Decoder) StackIndex(i int) (Kind, int) { + // NOTE: Keep in sync with Encoder.StackIndex. + switch s := d.tokens.index(i); { + case i > 0 && s.isObject(): + return '{', s.length() + case i > 0 && s.isArray(): + return '[', s.length() + default: + return 0, s.length() + } +} + +// StackPointer returns a JSON Pointer (RFC 6901) to the most recently read value. +// Object names are only present if AllowDuplicateNames is false, otherwise +// object members are represented using their index within the object. +func (d *Decoder) StackPointer() string { + d.names.copyQuotedBuffer(d.buf) + return string(d.appendStackPointer(nil)) +} + +// consumeWhitespace consumes leading JSON whitespace per RFC 7159, section 2. +func consumeWhitespace(b []byte) (n int) { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + for len(b) > n && (b[n] == ' ' || b[n] == '\t' || b[n] == '\r' || b[n] == '\n') { + n++ + } + return n +} + +// consumeNull consumes the next JSON null literal per RFC 7159, section 3. +// It returns 0 if it is invalid, in which case consumeLiteral should be used. +func consumeNull(b []byte) int { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + const literal = "null" + if len(b) >= len(literal) && string(b[:len(literal)]) == literal { + return len(literal) + } + return 0 +} + +// consumeFalse consumes the next JSON false literal per RFC 7159, section 3. +// It returns 0 if it is invalid, in which case consumeLiteral should be used. +func consumeFalse(b []byte) int { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + const literal = "false" + if len(b) >= len(literal) && string(b[:len(literal)]) == literal { + return len(literal) + } + return 0 +} + +// consumeTrue consumes the next JSON true literal per RFC 7159, section 3. +// It returns 0 if it is invalid, in which case consumeLiteral should be used. +func consumeTrue(b []byte) int { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + const literal = "true" + if len(b) >= len(literal) && string(b[:len(literal)]) == literal { + return len(literal) + } + return 0 +} + +// consumeLiteral consumes the next JSON literal per RFC 7159, section 3. +// If the input appears truncated, it returns io.ErrUnexpectedEOF. +func consumeLiteral(b []byte, lit string) (n int, err error) { + for i := 0; i < len(b) && i < len(lit); i++ { + if b[i] != lit[i] { + return i, newInvalidCharacterError(b[i:], "within literal "+lit+" (expecting "+strconv.QuoteRune(rune(lit[i]))+")") + } + } + if len(b) < len(lit) { + return len(b), io.ErrUnexpectedEOF + } + return len(lit), nil +} + +// consumeSimpleString consumes the next JSON string per RFC 7159, section 7 +// but is limited to the grammar for an ASCII string without escape sequences. +// It returns 0 if it is invalid or more complicated than a simple string, +// in which case consumeString should be called. +func consumeSimpleString(b []byte) (n int) { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + if len(b) > 0 && b[0] == '"' { + n++ + for len(b) > n && (' ' <= b[n] && b[n] != '\\' && b[n] != '"' && b[n] < utf8.RuneSelf) { + n++ + } + if len(b) > n && b[n] == '"' { + n++ + return n + } + } + return 0 +} + +// consumeString consumes the next JSON string per RFC 7159, section 7. +// If validateUTF8 is false, then this allows the presence of invalid UTF-8 +// characters within the string itself. +// It reports the number of bytes consumed and whether an error was encountered. +// If the input appears truncated, it returns io.ErrUnexpectedEOF. +func consumeString(flags *valueFlags, b []byte, validateUTF8 bool) (n int, err error) { + return consumeStringResumable(flags, b, 0, validateUTF8) +} + +// consumeStringResumable is identical to consumeString but supports resuming +// from a previous call that returned io.ErrUnexpectedEOF. +func consumeStringResumable(flags *valueFlags, b []byte, resumeOffset int, validateUTF8 bool) (n int, err error) { + // Consume the leading double quote. + switch { + case resumeOffset > 0: + n = resumeOffset // already handled the leading quote + case uint(len(b)) == 0: + return n, io.ErrUnexpectedEOF + case b[0] == '"': + n++ + default: + return n, newInvalidCharacterError(b[n:], `at start of string (expecting '"')`) + } + + // Consume every character in the string. + for uint(len(b)) > uint(n) { + // Optimize for long sequences of unescaped characters. + noEscape := func(c byte) bool { + return c < utf8.RuneSelf && ' ' <= c && c != '\\' && c != '"' + } + for uint(len(b)) > uint(n) && noEscape(b[n]) { + n++ + } + if uint(len(b)) <= uint(n) { + return n, io.ErrUnexpectedEOF + } + + // Check for terminating double quote. + if b[n] == '"' { + n++ + return n, nil + } + + switch r, rn := utf8.DecodeRune(b[n:]); { + // Handle UTF-8 encoded byte sequence. + // Due to specialized handling of ASCII above, we know that + // all normal sequences at this point must be 2 bytes or larger. + case rn > 1: + n += rn + // Handle escape sequence. + case r == '\\': + flags.set(stringNonVerbatim) + resumeOffset = n + if uint(len(b)) < uint(n+2) { + return resumeOffset, io.ErrUnexpectedEOF + } + switch r := b[n+1]; r { + case '/': + // Forward slash is the only character with 3 representations. + // Per RFC 8785, section 3.2.2.2., this must not be escaped. + flags.set(stringNonCanonical) + n += 2 + case '"', '\\', 'b', 'f', 'n', 'r', 't': + n += 2 + case 'u': + if uint(len(b)) < uint(n+6) { + if !hasEscapeSequencePrefix(b[n:]) { + flags.set(stringNonCanonical) + return n, &SyntacticError{str: "invalid escape sequence " + strconv.Quote(string(b[n:])) + " within string"} + } + return resumeOffset, io.ErrUnexpectedEOF + } + v1, ok := parseHexUint16(b[n+2 : n+6]) + if !ok { + flags.set(stringNonCanonical) + return n, &SyntacticError{str: "invalid escape sequence " + strconv.Quote(string(b[n:n+6])) + " within string"} + } + // Only certain control characters can use the \uFFFF notation + // for canonical formatting (per RFC 8785, section 3.2.2.2.). + switch v1 { + // \uFFFF notation not permitted for these characters. + case '\b', '\f', '\n', '\r', '\t': + flags.set(stringNonCanonical) + default: + // \uFFFF notation only permitted for control characters. + if v1 >= ' ' { + flags.set(stringNonCanonical) + } else { + // \uFFFF notation must be lower case. + for _, c := range b[n+2 : n+6] { + if 'A' <= c && c <= 'F' { + flags.set(stringNonCanonical) + } + } + } + } + n += 6 + + if validateUTF8 && utf16.IsSurrogate(rune(v1)) { + if uint(len(b)) >= uint(n+2) && (b[n] != '\\' || b[n+1] != 'u') { + return n, &SyntacticError{str: "invalid unpaired surrogate half within string"} + } + if uint(len(b)) < uint(n+6) { + if !hasEscapeSequencePrefix(b[n:]) { + flags.set(stringNonCanonical) + return n, &SyntacticError{str: "invalid escape sequence " + strconv.Quote(string(b[n:])) + " within string"} + } + return resumeOffset, io.ErrUnexpectedEOF + } + v2, ok := parseHexUint16(b[n+2 : n+6]) + if !ok { + return n, &SyntacticError{str: "invalid escape sequence " + strconv.Quote(string(b[n:n+6])) + " within string"} + } + if utf16.DecodeRune(rune(v1), rune(v2)) == utf8.RuneError { + return n, &SyntacticError{str: "invalid surrogate pair in string"} + } + n += 6 + } + default: + flags.set(stringNonCanonical) + return n, &SyntacticError{str: "invalid escape sequence " + strconv.Quote(string(b[n:n+2])) + " within string"} + } + // Handle invalid UTF-8. + case r == utf8.RuneError: + if !utf8.FullRune(b[n:]) { + return n, io.ErrUnexpectedEOF + } + flags.set(stringNonVerbatim | stringNonCanonical) + if validateUTF8 { + return n, &SyntacticError{str: "invalid UTF-8 within string"} + } + n++ + // Handle invalid control characters. + case r < ' ': + flags.set(stringNonVerbatim | stringNonCanonical) + return n, newInvalidCharacterError(b[n:], "within string (expecting non-control character)") + default: + panic("BUG: unhandled character " + quoteRune(b[n:])) + } + } + return n, io.ErrUnexpectedEOF +} + +// hasEscapeSequencePrefix reports whether b is possibly +// the truncated prefix of a \uFFFF escape sequence. +func hasEscapeSequencePrefix(b []byte) bool { + for i, c := range b { + switch { + case i == 0 && c != '\\': + return false + case i == 1 && c != 'u': + return false + case i >= 2 && i < 6 && !('0' <= c && c <= '9') && !('a' <= c && c <= 'f') && !('A' <= c && c <= 'F'): + return false + } + } + return true +} + +// unescapeString appends the unescaped form of a JSON string in src to dst. +// Any invalid UTF-8 within the string will be replaced with utf8.RuneError. +// The input must be an entire JSON string with no surrounding whitespace. +func unescapeString(dst, src []byte) (v []byte, ok bool) { + // Consume leading double quote. + if uint(len(src)) == 0 || src[0] != '"' { + return dst, false + } + i, n := 1, 1 + + // Consume every character until completion. + for uint(len(src)) > uint(n) { + // Optimize for long sequences of unescaped characters. + noEscape := func(c byte) bool { + return c < utf8.RuneSelf && ' ' <= c && c != '\\' && c != '"' + } + for uint(len(src)) > uint(n) && noEscape(src[n]) { + n++ + } + if uint(len(src)) <= uint(n) { + break + } + + // Check for terminating double quote. + if src[n] == '"' { + dst = append(dst, src[i:n]...) + n++ + return dst, len(src) == n + } + + switch r, rn := utf8.DecodeRune(src[n:]); { + // Handle UTF-8 encoded byte sequence. + // Due to specialized handling of ASCII above, we know that + // all normal sequences at this point must be 2 bytes or larger. + case rn > 1: + n += rn + // Handle escape sequence. + case r == '\\': + dst = append(dst, src[i:n]...) + if r < ' ' { + return dst, false // invalid control character or unescaped quote + } + + // Handle escape sequence. + if uint(len(src)) < uint(n+2) { + return dst, false // truncated escape sequence + } + switch r := src[n+1]; r { + case '"', '\\', '/': + dst = append(dst, r) + n += 2 + case 'b': + dst = append(dst, '\b') + n += 2 + case 'f': + dst = append(dst, '\f') + n += 2 + case 'n': + dst = append(dst, '\n') + n += 2 + case 'r': + dst = append(dst, '\r') + n += 2 + case 't': + dst = append(dst, '\t') + n += 2 + case 'u': + if uint(len(src)) < uint(n+6) { + return dst, false // truncated escape sequence + } + v1, ok := parseHexUint16(src[n+2 : n+6]) + if !ok { + return dst, false // invalid escape sequence + } + n += 6 + + // Check whether this is a surrogate half. + r := rune(v1) + if utf16.IsSurrogate(r) { + r = utf8.RuneError // assume failure unless the following succeeds + if uint(len(src)) >= uint(n+6) && src[n+0] == '\\' && src[n+1] == 'u' { + if v2, ok := parseHexUint16(src[n+2 : n+6]); ok { + if r = utf16.DecodeRune(rune(v1), rune(v2)); r != utf8.RuneError { + n += 6 + } + } + } + } + + dst = utf8.AppendRune(dst, r) + default: + return dst, false // invalid escape sequence + } + i = n + // Handle invalid UTF-8. + case r == utf8.RuneError: + // NOTE: An unescaped string may be longer than the escaped string + // because invalid UTF-8 bytes are being replaced. + dst = append(dst, src[i:n]...) + dst = append(dst, "\uFFFD"...) + n += rn + i = n + // Handle invalid control characters. + case r < ' ': + dst = append(dst, src[i:n]...) + return dst, false // invalid control character or unescaped quote + default: + panic("BUG: unhandled character " + quoteRune(src[n:])) + } + } + dst = append(dst, src[i:n]...) + return dst, false // truncated input +} + +// unescapeStringMayCopy returns the unescaped form of b. +// If there are no escaped characters, the output is simply a subslice of +// the input with the surrounding quotes removed. +// Otherwise, a new buffer is allocated for the output. +func unescapeStringMayCopy(b []byte, isVerbatim bool) []byte { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + if isVerbatim { + return b[len(`"`) : len(b)-len(`"`)] + } + b, _ = unescapeString(make([]byte, 0, len(b)), b) + return b +} + +// consumeSimpleNumber consumes the next JSON number per RFC 7159, section 6 +// but is limited to the grammar for a positive integer. +// It returns 0 if it is invalid or more complicated than a simple integer, +// in which case consumeNumber should be called. +func consumeSimpleNumber(b []byte) (n int) { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + if len(b) > 0 { + if b[0] == '0' { + n++ + } else if '1' <= b[0] && b[0] <= '9' { + n++ + for len(b) > n && ('0' <= b[n] && b[n] <= '9') { + n++ + } + } else { + return 0 + } + if len(b) == n || !(b[n] == '.' || b[n] == 'e' || b[n] == 'E') { + return n + } + } + return 0 +} + +type consumeNumberState uint + +const ( + consumeNumberInit consumeNumberState = iota + beforeIntegerDigits + withinIntegerDigits + beforeFractionalDigits + withinFractionalDigits + beforeExponentDigits + withinExponentDigits +) + +// consumeNumber consumes the next JSON number per RFC 7159, section 6. +// It reports the number of bytes consumed and whether an error was encountered. +// If the input appears truncated, it returns io.ErrUnexpectedEOF. +// +// Note that JSON numbers are not self-terminating. +// If the entire input is consumed, then the caller needs to consider whether +// there may be subsequent unread data that may still be part of this number. +func consumeNumber(b []byte) (n int, err error) { + n, _, err = consumeNumberResumable(b, 0, consumeNumberInit) + return n, err +} + +// consumeNumberResumable is identical to consumeNumber but supports resuming +// from a previous call that returned io.ErrUnexpectedEOF. +func consumeNumberResumable(b []byte, resumeOffset int, state consumeNumberState) (n int, _ consumeNumberState, err error) { + // Jump to the right state when resuming from a partial consumption. + n = resumeOffset + if state > consumeNumberInit { + switch state { + case withinIntegerDigits, withinFractionalDigits, withinExponentDigits: + // Consume leading digits. + for len(b) > n && ('0' <= b[n] && b[n] <= '9') { + n++ + } + if len(b) == n { + return n, state, nil // still within the same state + } + state++ // switches "withinX" to "beforeY" where Y is the state after X + } + switch state { + case beforeIntegerDigits: + goto beforeInteger + case beforeFractionalDigits: + goto beforeFractional + case beforeExponentDigits: + goto beforeExponent + default: + return n, state, nil + } + } + + // Consume required integer component (with optional minus sign). +beforeInteger: + resumeOffset = n + if len(b) > 0 && b[0] == '-' { + n++ + } + switch { + case len(b) == n: + return resumeOffset, beforeIntegerDigits, io.ErrUnexpectedEOF + case b[n] == '0': + n++ + state = beforeFractionalDigits + case '1' <= b[n] && b[n] <= '9': + n++ + for len(b) > n && ('0' <= b[n] && b[n] <= '9') { + n++ + } + state = withinIntegerDigits + default: + return n, state, newInvalidCharacterError(b[n:], "within number (expecting digit)") + } + + // Consume optional fractional component. +beforeFractional: + if len(b) > n && b[n] == '.' { + resumeOffset = n + n++ + switch { + case len(b) == n: + return resumeOffset, beforeFractionalDigits, io.ErrUnexpectedEOF + case '0' <= b[n] && b[n] <= '9': + n++ + default: + return n, state, newInvalidCharacterError(b[n:], "within number (expecting digit)") + } + for len(b) > n && ('0' <= b[n] && b[n] <= '9') { + n++ + } + state = withinFractionalDigits + } + + // Consume optional exponent component. +beforeExponent: + if len(b) > n && (b[n] == 'e' || b[n] == 'E') { + resumeOffset = n + n++ + if len(b) > n && (b[n] == '-' || b[n] == '+') { + n++ + } + switch { + case len(b) == n: + return resumeOffset, beforeExponentDigits, io.ErrUnexpectedEOF + case '0' <= b[n] && b[n] <= '9': + n++ + default: + return n, state, newInvalidCharacterError(b[n:], "within number (expecting digit)") + } + for len(b) > n && ('0' <= b[n] && b[n] <= '9') { + n++ + } + state = withinExponentDigits + } + + return n, state, nil +} + +// parseHexUint16 is similar to strconv.ParseUint, +// but operates directly on []byte and is optimized for base-16. +// See https://go.dev/issue/42429. +func parseHexUint16(b []byte) (v uint16, ok bool) { + if len(b) != 4 { + return 0, false + } + for _, c := range b[:4] { + switch { + case '0' <= c && c <= '9': + c = c - '0' + case 'a' <= c && c <= 'f': + c = 10 + c - 'a' + case 'A' <= c && c <= 'F': + c = 10 + c - 'A' + default: + return 0, false + } + v = v*16 + uint16(c) + } + return v, true +} + +// parseDecUint is similar to strconv.ParseUint, +// but operates directly on []byte and is optimized for base-10. +// If the number is syntactically valid but overflows uint64, +// then it returns (math.MaxUint64, false). +// See https://go.dev/issue/42429. +func parseDecUint(b []byte) (v uint64, ok bool) { + // Overflow logic is based on strconv/atoi.go:138-149 from Go1.15, where: + // - cutoff is equal to math.MaxUint64/10+1, and + // - the n1 > maxVal check is unnecessary + // since maxVal is equivalent to math.MaxUint64. + var n int + var overflow bool + for len(b) > n && ('0' <= b[n] && b[n] <= '9') { + overflow = overflow || v >= math.MaxUint64/10+1 + v *= 10 + + v1 := v + uint64(b[n]-'0') + overflow = overflow || v1 < v + v = v1 + + n++ + } + if n == 0 || len(b) != n { + return 0, false + } + if overflow { + return math.MaxUint64, false + } + return v, true +} + +// parseFloat parses a floating point number according to the Go float grammar. +// Note that the JSON number grammar is a strict subset. +// +// If the number overflows the finite representation of a float, +// then we return MaxFloat since any finite value will always be infinitely +// more accurate at representing another finite value than an infinite value. +func parseFloat(b []byte, bits int) (v float64, ok bool) { + // Fast path for exact integer numbers which fit in the + // 24-bit or 53-bit significand of a float32 or float64. + var negLen int // either 0 or 1 + if len(b) > 0 && b[0] == '-' { + negLen = 1 + } + u, ok := parseDecUint(b[negLen:]) + if ok && ((bits == 32 && u <= 1<<24) || (bits == 64 && u <= 1<<53)) { + return math.Copysign(float64(u), float64(-1*negLen)), true + } + + // Note that the []byte->string conversion unfortunately allocates. + // See https://go.dev/issue/42429 for more information. + fv, err := strconv.ParseFloat(string(b), bits) + if math.IsInf(fv, 0) { + switch { + case bits == 32 && math.IsInf(fv, +1): + return +math.MaxFloat32, true + case bits == 64 && math.IsInf(fv, +1): + return +math.MaxFloat64, true + case bits == 32 && math.IsInf(fv, -1): + return -math.MaxFloat32, true + case bits == 64 && math.IsInf(fv, -1): + return -math.MaxFloat64, true + } + } + return fv, err == nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go new file mode 100644 index 000000000..e4eefa3de --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go @@ -0,0 +1,182 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements serialization of JSON +// as specified in RFC 4627, RFC 7159, RFC 7493, RFC 8259, and RFC 8785. +// JSON is a simple data interchange format that can represent +// primitive data types such as booleans, strings, and numbers, +// in addition to structured data types such as objects and arrays. +// +// # Terminology +// +// This package uses the terms "encode" and "decode" for syntactic functionality +// that is concerned with processing JSON based on its grammar, and +// uses the terms "marshal" and "unmarshal" for semantic functionality +// that determines the meaning of JSON values as Go values and vice-versa. +// It aims to provide a clear distinction between functionality that +// is purely concerned with encoding versus that of marshaling. +// For example, one can directly encode a stream of JSON tokens without +// needing to marshal a concrete Go value representing them. +// Similarly, one can decode a stream of JSON tokens without +// needing to unmarshal them into a concrete Go value. +// +// This package uses JSON terminology when discussing JSON, which may differ +// from related concepts in Go or elsewhere in computing literature. +// +// - A JSON "object" refers to an unordered collection of name/value members. +// - A JSON "array" refers to an ordered sequence of elements. +// - A JSON "value" refers to either a literal (i.e., null, false, or true), +// string, number, object, or array. +// +// See RFC 8259 for more information. +// +// # Specifications +// +// Relevant specifications include RFC 4627, RFC 7159, RFC 7493, RFC 8259, +// and RFC 8785. Each RFC is generally a stricter subset of another RFC. +// In increasing order of strictness: +// +// - RFC 4627 and RFC 7159 do not require (but recommend) the use of UTF-8 +// and also do not require (but recommend) that object names be unique. +// - RFC 8259 requires the use of UTF-8, +// but does not require (but recommends) that object names be unique. +// - RFC 7493 requires the use of UTF-8 +// and also requires that object names be unique. +// - RFC 8785 defines a canonical representation. It requires the use of UTF-8 +// and also requires that object names be unique and in a specific ordering. +// It specifies exactly how strings and numbers must be formatted. +// +// The primary difference between RFC 4627 and RFC 7159 is that the former +// restricted top-level values to only JSON objects and arrays, while +// RFC 7159 and subsequent RFCs permit top-level values to additionally be +// JSON nulls, booleans, strings, or numbers. +// +// By default, this package operates on RFC 7493, but can be configured +// to operate according to the other RFC specifications. +// RFC 7493 is a stricter subset of RFC 8259 and fully compliant with it. +// In particular, it makes specific choices about behavior that RFC 8259 +// leaves as undefined in order to ensure greater interoperability. +// +// # JSON Representation of Go structs +// +// A Go struct is naturally represented as a JSON object, +// where each Go struct field corresponds with a JSON object member. +// When marshaling, all Go struct fields are recursively encoded in depth-first +// order as JSON object members except those that are ignored or omitted. +// When unmarshaling, JSON object members are recursively decoded +// into the corresponding Go struct fields. +// Object members that do not match any struct fields, +// also known as “unknown members”, are ignored by default or rejected +// if UnmarshalOptions.RejectUnknownMembers is specified. +// +// The representation of each struct field can be customized in the +// "json" struct field tag, where the tag is a comma separated list of options. +// As a special case, if the entire tag is `json:"-"`, +// then the field is ignored with regard to its JSON representation. +// +// The first option is the JSON object name override for the Go struct field. +// If the name is not specified, then the Go struct field name +// is used as the JSON object name. JSON names containing commas or quotes, +// or names identical to "" or "-", can be specified using +// a single-quoted string literal, where the syntax is identical to +// the Go grammar for a double-quoted string literal, +// but instead uses single quotes as the delimiters. +// By default, unmarshaling uses case-sensitive matching to identify +// the Go struct field associated with a JSON object name. +// +// After the name, the following tag options are supported: +// +// - omitzero: When marshaling, the "omitzero" option specifies that +// the struct field should be omitted if the field value is zero +// as determined by the "IsZero() bool" method if present, +// otherwise based on whether the field is the zero Go value. +// This option has no effect when unmarshaling. +// +// - omitempty: When marshaling, the "omitempty" option specifies that +// the struct field should be omitted if the field value would have been +// encoded as a JSON null, empty string, empty object, or empty array. +// This option has no effect when unmarshaling. +// +// - string: The "string" option specifies that +// MarshalOptions.StringifyNumbers and UnmarshalOptions.StringifyNumbers +// be set when marshaling or unmarshaling a struct field value. +// This causes numeric types to be encoded as a JSON number +// within a JSON string, and to be decoded from either a JSON number or +// a JSON string containing a JSON number. +// This extra level of encoding is often necessary since +// many JSON parsers cannot precisely represent 64-bit integers. +// +// - nocase: When unmarshaling, the "nocase" option specifies that +// if the JSON object name does not exactly match the JSON name +// for any of the struct fields, then it attempts to match the struct field +// using a case-insensitive match that also ignores dashes and underscores. +// If multiple fields match, the first declared field in breadth-first order +// takes precedence. This option has no effect when marshaling. +// +// - inline: The "inline" option specifies that +// the JSON representable content of this field type is to be promoted +// as if they were specified in the parent struct. +// It is the JSON equivalent of Go struct embedding. +// A Go embedded field is implicitly inlined unless an explicit JSON name +// is specified. The inlined field must be a Go struct +// (that does not implement any JSON methods), RawValue, map[string]T, +// or an unnamed pointer to such types. When marshaling, +// inlined fields from a pointer type are omitted if it is nil. +// Inlined fields of type RawValue and map[string]T are called +// “inlined fallbacks” as they can represent all possible +// JSON object members not directly handled by the parent struct. +// Only one inlined fallback field may be specified in a struct, +// while many non-fallback fields may be specified. This option +// must not be specified with any other option (including the JSON name). +// +// - unknown: The "unknown" option is a specialized variant +// of the inlined fallback to indicate that this Go struct field +// contains any number of unknown JSON object members. The field type +// must be a RawValue, map[string]T, or an unnamed pointer to such types. +// If MarshalOptions.DiscardUnknownMembers is specified when marshaling, +// the contents of this field are ignored. +// If UnmarshalOptions.RejectUnknownMembers is specified when unmarshaling, +// any unknown object members are rejected regardless of whether +// an inlined fallback with the "unknown" option exists. This option +// must not be specified with any other option (including the JSON name). +// +// - format: The "format" option specifies a format flag +// used to specialize the formatting of the field value. +// The option is a key-value pair specified as "format:value" where +// the value must be either a literal consisting of letters and numbers +// (e.g., "format:RFC3339") or a single-quoted string literal +// (e.g., "format:'2006-01-02'"). The interpretation of the format flag +// is determined by the struct field type. +// +// The "omitzero" and "omitempty" options are mostly semantically identical. +// The former is defined in terms of the Go type system, +// while the latter in terms of the JSON type system. +// Consequently they behave differently in some circumstances. +// For example, only a nil slice or map is omitted under "omitzero", while +// an empty slice or map is omitted under "omitempty" regardless of nilness. +// The "omitzero" option is useful for types with a well-defined zero value +// (e.g., netip.Addr) or have an IsZero method (e.g., time.Time). +// +// Every Go struct corresponds to a list of JSON representable fields +// which is constructed by performing a breadth-first search over +// all struct fields (excluding unexported or ignored fields), +// where the search recursively descends into inlined structs. +// The set of non-inlined fields in a struct must have unique JSON names. +// If multiple fields all have the same JSON name, then the one +// at shallowest depth takes precedence and the other fields at deeper depths +// are excluded from the list of JSON representable fields. +// If multiple fields at the shallowest depth have the same JSON name, +// then all of those fields are excluded from the list. This is analogous to +// Go visibility rules for struct field selection with embedded struct types. +// +// Marshaling or unmarshaling a non-empty struct +// without any JSON representable fields results in a SemanticError. +// Unexported fields must not have any `json` tags except for `json:"-"`. +package json + +// requireKeyedLiterals can be embedded in a struct to require keyed literals. +type requireKeyedLiterals struct{} + +// nonComparable can be embedded in a struct to prevent comparability. +type nonComparable [0]func() diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go new file mode 100644 index 000000000..5b81ca15a --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go @@ -0,0 +1,1170 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "io" + "math" + "math/bits" + "strconv" + "unicode/utf16" + "unicode/utf8" +) + +// EncodeOptions configures how JSON encoding operates. +// The zero value is equivalent to the default settings, +// which is compliant with both RFC 7493 and RFC 8259. +type EncodeOptions struct { + requireKeyedLiterals + nonComparable + + // multiline specifies whether the encoder should emit multiline output. + multiline bool + + // omitTopLevelNewline specifies whether to omit the newline + // that is appended after every top-level JSON value when streaming. + omitTopLevelNewline bool + + // AllowDuplicateNames specifies that JSON objects may contain + // duplicate member names. Disabling the duplicate name check may provide + // performance benefits, but breaks compliance with RFC 7493, section 2.3. + // The output will still be compliant with RFC 8259, + // which leaves the handling of duplicate names as unspecified behavior. + AllowDuplicateNames bool + + // AllowInvalidUTF8 specifies that JSON strings may contain invalid UTF-8, + // which will be mangled as the Unicode replacement character, U+FFFD. + // This causes the encoder to break compliance with + // RFC 7493, section 2.1, and RFC 8259, section 8.1. + AllowInvalidUTF8 bool + + // preserveRawStrings specifies that WriteToken and WriteValue should not + // reformat any JSON string, but keep the formatting verbatim. + preserveRawStrings bool + + // canonicalizeNumbers specifies that WriteToken and WriteValue should + // reformat any JSON numbers according to RFC 8785, section 3.2.2.3. + canonicalizeNumbers bool + + // EscapeRune reports whether the provided character should be escaped + // as a hexadecimal Unicode codepoint (e.g., \ufffd). + // If nil, the shortest and simplest encoding will be used, + // which is also the formatting specified by RFC 8785, section 3.2.2.2. + EscapeRune func(rune) bool + + // Indent (if non-empty) specifies that the encoder should emit multiline + // output where each element in a JSON object or array begins on a new, + // indented line beginning with the indent prefix followed by one or more + // copies of indent according to the indentation nesting. + // It may only be composed of space or tab characters. + Indent string + + // IndentPrefix is prepended to each line within a JSON object or array. + // The purpose of the indent prefix is to encode data that can more easily + // be embedded inside other formatted JSON data. + // It may only be composed of space or tab characters. + // It is ignored if Indent is empty. + IndentPrefix string +} + +// Encoder is a streaming encoder from raw JSON tokens and values. +// It is used to write a stream of top-level JSON values, +// each terminated with a newline character. +// +// WriteToken and WriteValue calls may be interleaved. +// For example, the following JSON value: +// +// {"name":"value","array":[null,false,true,3.14159],"object":{"k":"v"}} +// +// can be composed with the following calls (ignoring errors for brevity): +// +// e.WriteToken(ObjectStart) // { +// e.WriteToken(String("name")) // "name" +// e.WriteToken(String("value")) // "value" +// e.WriteValue(RawValue(`"array"`)) // "array" +// e.WriteToken(ArrayStart) // [ +// e.WriteToken(Null) // null +// e.WriteToken(False) // false +// e.WriteValue(RawValue("true")) // true +// e.WriteToken(Float(3.14159)) // 3.14159 +// e.WriteToken(ArrayEnd) // ] +// e.WriteValue(RawValue(`"object"`)) // "object" +// e.WriteValue(RawValue(`{"k":"v"}`)) // {"k":"v"} +// e.WriteToken(ObjectEnd) // } +// +// The above is one of many possible sequence of calls and +// may not represent the most sensible method to call for any given token/value. +// For example, it is probably more common to call WriteToken with a string +// for object names. +type Encoder struct { + state + encodeBuffer + options EncodeOptions + + seenPointers seenPointers // only used when marshaling +} + +// encodeBuffer is a buffer split into 2 segments: +// +// - buf[0:len(buf)] // written (but unflushed) portion of the buffer +// - buf[len(buf):cap(buf)] // unused portion of the buffer +type encodeBuffer struct { + buf []byte // may alias wr if it is a bytes.Buffer + + // baseOffset is added to len(buf) to obtain the absolute offset + // relative to the start of io.Writer stream. + baseOffset int64 + + wr io.Writer + + // maxValue is the approximate maximum RawValue size passed to WriteValue. + maxValue int + // unusedCache is the buffer returned by the UnusedBuffer method. + unusedCache []byte + // bufStats is statistics about buffer utilization. + // It is only used with pooled encoders in pools.go. + bufStats bufferStatistics +} + +// NewEncoder constructs a new streaming encoder writing to w. +func NewEncoder(w io.Writer) *Encoder { + return EncodeOptions{}.NewEncoder(w) +} + +// NewEncoder constructs a new streaming encoder writing to w +// configured with the provided options. +// It flushes the internal buffer when the buffer is sufficiently full or +// when a top-level value has been written. +// +// If w is a bytes.Buffer, then the encoder appends directly into the buffer +// without copying the contents from an intermediate buffer. +func (o EncodeOptions) NewEncoder(w io.Writer) *Encoder { + e := new(Encoder) + o.ResetEncoder(e, w) + return e +} + +// ResetEncoder resets an encoder such that it is writing afresh to w and +// configured with the provided options. +func (o EncodeOptions) ResetEncoder(e *Encoder, w io.Writer) { + if e == nil { + panic("json: invalid nil Encoder") + } + if w == nil { + panic("json: invalid nil io.Writer") + } + e.reset(nil, w, o) +} + +func (e *Encoder) reset(b []byte, w io.Writer, o EncodeOptions) { + if len(o.Indent) > 0 { + o.multiline = true + if s := trimLeftSpaceTab(o.IndentPrefix); len(s) > 0 { + panic("json: invalid character " + quoteRune([]byte(s)) + " in indent prefix") + } + if s := trimLeftSpaceTab(o.Indent); len(s) > 0 { + panic("json: invalid character " + quoteRune([]byte(s)) + " in indent") + } + } + e.state.reset() + e.encodeBuffer = encodeBuffer{buf: b, wr: w, bufStats: e.bufStats} + e.options = o + if bb, ok := w.(*bytes.Buffer); ok && bb != nil { + e.buf = bb.Bytes()[bb.Len():] // alias the unused buffer of bb + } +} + +// Reset resets an encoder such that it is writing afresh to w but +// keeps any pre-existing encoder options. +func (e *Encoder) Reset(w io.Writer) { + e.options.ResetEncoder(e, w) +} + +// needFlush determines whether to flush at this point. +func (e *Encoder) needFlush() bool { + // NOTE: This function is carefully written to be inlineable. + + // Avoid flushing if e.wr is nil since there is no underlying writer. + // Flush if less than 25% of the capacity remains. + // Flushing at some constant fraction ensures that the buffer stops growing + // so long as the largest Token or Value fits within that unused capacity. + return e.wr != nil && (e.tokens.depth() == 1 || len(e.buf) > 3*cap(e.buf)/4) +} + +// flush flushes the buffer to the underlying io.Writer. +// It may append a trailing newline after the top-level value. +func (e *Encoder) flush() error { + if e.wr == nil || e.avoidFlush() { + return nil + } + + // In streaming mode, always emit a newline after the top-level value. + if e.tokens.depth() == 1 && !e.options.omitTopLevelNewline { + e.buf = append(e.buf, '\n') + } + + // Inform objectNameStack that we are about to flush the buffer content. + e.names.copyQuotedBuffer(e.buf) + + // Specialize bytes.Buffer for better performance. + if bb, ok := e.wr.(*bytes.Buffer); ok { + // If e.buf already aliases the internal buffer of bb, + // then the Write call simply increments the internal offset, + // otherwise Write operates as expected. + // See https://go.dev/issue/42986. + n, _ := bb.Write(e.buf) // never fails unless bb is nil + e.baseOffset += int64(n) + + // If the internal buffer of bytes.Buffer is too small, + // append operations elsewhere in the Encoder may grow the buffer. + // This would be semantically correct, but hurts performance. + // As such, ensure 25% of the current length is always available + // to reduce the probability that other appends must allocate. + if avail := bb.Cap() - bb.Len(); avail < bb.Len()/4 { + bb.Grow(avail + 1) + } + + e.buf = bb.Bytes()[bb.Len():] // alias the unused buffer of bb + return nil + } + + // Flush the internal buffer to the underlying io.Writer. + n, err := e.wr.Write(e.buf) + e.baseOffset += int64(n) + if err != nil { + // In the event of an error, preserve the unflushed portion. + // Thus, write errors aren't fatal so long as the io.Writer + // maintains consistent state after errors. + if n > 0 { + e.buf = e.buf[:copy(e.buf, e.buf[n:])] + } + return &ioError{action: "write", err: err} + } + e.buf = e.buf[:0] + + // Check whether to grow the buffer. + // Note that cap(e.buf) may already exceed maxBufferSize since + // an append elsewhere already grew it to store a large token. + const maxBufferSize = 4 << 10 + const growthSizeFactor = 2 // higher value is faster + const growthRateFactor = 2 // higher value is slower + // By default, grow if below the maximum buffer size. + grow := cap(e.buf) <= maxBufferSize/growthSizeFactor + // Growing can be expensive, so only grow + // if a sufficient number of bytes have been processed. + grow = grow && int64(cap(e.buf)) < e.previousOffsetEnd()/growthRateFactor + if grow { + e.buf = make([]byte, 0, cap(e.buf)*growthSizeFactor) + } + + return nil +} + +func (e *encodeBuffer) previousOffsetEnd() int64 { return e.baseOffset + int64(len(e.buf)) } +func (e *encodeBuffer) unflushedBuffer() []byte { return e.buf } + +// avoidFlush indicates whether to avoid flushing to ensure there is always +// enough in the buffer to unwrite the last object member if it were empty. +func (e *Encoder) avoidFlush() bool { + switch { + case e.tokens.last.length() == 0: + // Never flush after ObjectStart or ArrayStart since we don't know yet + // if the object or array will end up being empty. + return true + case e.tokens.last.needObjectValue(): + // Never flush before the object value since we don't know yet + // if the object value will end up being empty. + return true + case e.tokens.last.needObjectName() && len(e.buf) >= 2: + // Never flush after the object value if it does turn out to be empty. + switch string(e.buf[len(e.buf)-2:]) { + case `ll`, `""`, `{}`, `[]`: // last two bytes of every empty value + return true + } + } + return false +} + +// unwriteEmptyObjectMember unwrites the last object member if it is empty +// and reports whether it performed an unwrite operation. +func (e *Encoder) unwriteEmptyObjectMember(prevName *string) bool { + if last := e.tokens.last; !last.isObject() || !last.needObjectName() || last.length() == 0 { + panic("BUG: must be called on an object after writing a value") + } + + // The flushing logic is modified to never flush a trailing empty value. + // The encoder never writes trailing whitespace eagerly. + b := e.unflushedBuffer() + + // Detect whether the last value was empty. + var n int + if len(b) >= 3 { + switch string(b[len(b)-2:]) { + case "ll": // last two bytes of `null` + n = len(`null`) + case `""`: + // It is possible for a non-empty string to have `""` as a suffix + // if the second to the last quote was escaped. + if b[len(b)-3] == '\\' { + return false // e.g., `"\""` is not empty + } + n = len(`""`) + case `{}`: + n = len(`{}`) + case `[]`: + n = len(`[]`) + } + } + if n == 0 { + return false + } + + // Unwrite the value, whitespace, colon, name, whitespace, and comma. + b = b[:len(b)-n] + b = trimSuffixWhitespace(b) + b = trimSuffixByte(b, ':') + b = trimSuffixString(b) + b = trimSuffixWhitespace(b) + b = trimSuffixByte(b, ',') + e.buf = b // store back truncated unflushed buffer + + // Undo state changes. + e.tokens.last.decrement() // for object member value + e.tokens.last.decrement() // for object member name + if !e.options.AllowDuplicateNames { + if e.tokens.last.isActiveNamespace() { + e.namespaces.last().removeLast() + } + e.names.clearLast() + if prevName != nil { + e.names.copyQuotedBuffer(e.buf) // required by objectNameStack.replaceLastUnquotedName + e.names.replaceLastUnquotedName(*prevName) + } + } + return true +} + +// unwriteOnlyObjectMemberName unwrites the only object member name +// and returns the unquoted name. +func (e *Encoder) unwriteOnlyObjectMemberName() string { + if last := e.tokens.last; !last.isObject() || last.length() != 1 { + panic("BUG: must be called on an object after writing first name") + } + + // Unwrite the name and whitespace. + b := trimSuffixString(e.buf) + isVerbatim := bytes.IndexByte(e.buf[len(b):], '\\') < 0 + name := string(unescapeStringMayCopy(e.buf[len(b):], isVerbatim)) + e.buf = trimSuffixWhitespace(b) + + // Undo state changes. + e.tokens.last.decrement() + if !e.options.AllowDuplicateNames { + if e.tokens.last.isActiveNamespace() { + e.namespaces.last().removeLast() + } + e.names.clearLast() + } + return name +} + +func trimSuffixWhitespace(b []byte) []byte { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + n := len(b) - 1 + for n >= 0 && (b[n] == ' ' || b[n] == '\t' || b[n] == '\r' || b[n] == '\n') { + n-- + } + return b[:n+1] +} + +func trimSuffixString(b []byte) []byte { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + if len(b) > 0 && b[len(b)-1] == '"' { + b = b[:len(b)-1] + } + for len(b) >= 2 && !(b[len(b)-1] == '"' && b[len(b)-2] != '\\') { + b = b[:len(b)-1] // trim all characters except an unescaped quote + } + if len(b) > 0 && b[len(b)-1] == '"' { + b = b[:len(b)-1] + } + return b +} + +func hasSuffixByte(b []byte, c byte) bool { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + return len(b) > 0 && b[len(b)-1] == c +} + +func trimSuffixByte(b []byte, c byte) []byte { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + if len(b) > 0 && b[len(b)-1] == c { + return b[:len(b)-1] + } + return b +} + +// WriteToken writes the next token and advances the internal write offset. +// +// The provided token kind must be consistent with the JSON grammar. +// For example, it is an error to provide a number when the encoder +// is expecting an object name (which is always a string), or +// to provide an end object delimiter when the encoder is finishing an array. +// If the provided token is invalid, then it reports a SyntacticError and +// the internal state remains unchanged. +func (e *Encoder) WriteToken(t Token) error { + k := t.Kind() + b := e.buf // use local variable to avoid mutating e in case of error + + // Append any delimiters or optional whitespace. + b = e.tokens.mayAppendDelim(b, k) + if e.options.multiline { + b = e.appendWhitespace(b, k) + } + + // Append the token to the output and to the state machine. + var err error + switch k { + case 'n': + b = append(b, "null"...) + err = e.tokens.appendLiteral() + case 'f': + b = append(b, "false"...) + err = e.tokens.appendLiteral() + case 't': + b = append(b, "true"...) + err = e.tokens.appendLiteral() + case '"': + n0 := len(b) // offset before calling t.appendString + if b, err = t.appendString(b, !e.options.AllowInvalidUTF8, e.options.preserveRawStrings, e.options.EscapeRune); err != nil { + break + } + if !e.options.AllowDuplicateNames && e.tokens.last.needObjectName() { + if !e.tokens.last.isValidNamespace() { + err = errInvalidNamespace + break + } + if e.tokens.last.isActiveNamespace() && !e.namespaces.last().insertQuoted(b[n0:], false) { + err = &SyntacticError{str: "duplicate name " + string(b[n0:]) + " in object"} + break + } + e.names.replaceLastQuotedOffset(n0) // only replace if insertQuoted succeeds + } + err = e.tokens.appendString() + case '0': + if b, err = t.appendNumber(b, e.options.canonicalizeNumbers); err != nil { + break + } + err = e.tokens.appendNumber() + case '{': + b = append(b, '{') + if err = e.tokens.pushObject(); err != nil { + break + } + if !e.options.AllowDuplicateNames { + e.names.push() + e.namespaces.push() + } + case '}': + b = append(b, '}') + if err = e.tokens.popObject(); err != nil { + break + } + if !e.options.AllowDuplicateNames { + e.names.pop() + e.namespaces.pop() + } + case '[': + b = append(b, '[') + err = e.tokens.pushArray() + case ']': + b = append(b, ']') + err = e.tokens.popArray() + default: + return &SyntacticError{str: "invalid json.Token"} + } + if err != nil { + return err + } + + // Finish off the buffer and store it back into e. + e.buf = b + if e.needFlush() { + return e.flush() + } + return nil +} + +const ( + rawIntNumber = -1 + rawUintNumber = -2 +) + +// writeNumber is specialized version of WriteToken, but optimized for numbers. +// As a special-case, if bits is -1 or -2, it will treat v as +// the raw-encoded bits of an int64 or uint64, respectively. +// It is only called from arshal_default.go. +func (e *Encoder) writeNumber(v float64, bits int, quote bool) error { + b := e.buf // use local variable to avoid mutating e in case of error + + // Append any delimiters or optional whitespace. + b = e.tokens.mayAppendDelim(b, '0') + if e.options.multiline { + b = e.appendWhitespace(b, '0') + } + + if quote { + // Append the value to the output. + n0 := len(b) // offset before appending the number + b = append(b, '"') + switch bits { + case rawIntNumber: + b = strconv.AppendInt(b, int64(math.Float64bits(v)), 10) + case rawUintNumber: + b = strconv.AppendUint(b, uint64(math.Float64bits(v)), 10) + default: + b = appendNumber(b, v, bits) + } + b = append(b, '"') + + // Escape the string if necessary. + if e.options.EscapeRune != nil { + b2 := append(e.unusedCache, b[n0+len(`"`):len(b)-len(`"`)]...) + b, _ = appendString(b[:n0], string(b2), false, e.options.EscapeRune) + e.unusedCache = b2[:0] + } + + // Update the state machine. + if !e.options.AllowDuplicateNames && e.tokens.last.needObjectName() { + if !e.tokens.last.isValidNamespace() { + return errInvalidNamespace + } + if e.tokens.last.isActiveNamespace() && !e.namespaces.last().insertQuoted(b[n0:], false) { + return &SyntacticError{str: "duplicate name " + string(b[n0:]) + " in object"} + } + e.names.replaceLastQuotedOffset(n0) // only replace if insertQuoted succeeds + } + if err := e.tokens.appendString(); err != nil { + return err + } + } else { + switch bits { + case rawIntNumber: + b = strconv.AppendInt(b, int64(math.Float64bits(v)), 10) + case rawUintNumber: + b = strconv.AppendUint(b, uint64(math.Float64bits(v)), 10) + default: + b = appendNumber(b, v, bits) + } + if err := e.tokens.appendNumber(); err != nil { + return err + } + } + + // Finish off the buffer and store it back into e. + e.buf = b + if e.needFlush() { + return e.flush() + } + return nil +} + +// WriteValue writes the next raw value and advances the internal write offset. +// The Encoder does not simply copy the provided value verbatim, but +// parses it to ensure that it is syntactically valid and reformats it +// according to how the Encoder is configured to format whitespace and strings. +// +// The provided value kind must be consistent with the JSON grammar +// (see examples on Encoder.WriteToken). If the provided value is invalid, +// then it reports a SyntacticError and the internal state remains unchanged. +func (e *Encoder) WriteValue(v RawValue) error { + e.maxValue |= len(v) // bitwise OR is a fast approximation of max + + k := v.Kind() + b := e.buf // use local variable to avoid mutating e in case of error + + // Append any delimiters or optional whitespace. + b = e.tokens.mayAppendDelim(b, k) + if e.options.multiline { + b = e.appendWhitespace(b, k) + } + + // Append the value the output. + var err error + v = v[consumeWhitespace(v):] + n0 := len(b) // offset before calling e.reformatValue + b, v, err = e.reformatValue(b, v, e.tokens.depth()) + if err != nil { + return err + } + v = v[consumeWhitespace(v):] + if len(v) > 0 { + return newInvalidCharacterError(v[0:], "after top-level value") + } + + // Append the kind to the state machine. + switch k { + case 'n', 'f', 't': + err = e.tokens.appendLiteral() + case '"': + if !e.options.AllowDuplicateNames && e.tokens.last.needObjectName() { + if !e.tokens.last.isValidNamespace() { + err = errInvalidNamespace + break + } + if e.tokens.last.isActiveNamespace() && !e.namespaces.last().insertQuoted(b[n0:], false) { + err = &SyntacticError{str: "duplicate name " + string(b[n0:]) + " in object"} + break + } + e.names.replaceLastQuotedOffset(n0) // only replace if insertQuoted succeeds + } + err = e.tokens.appendString() + case '0': + err = e.tokens.appendNumber() + case '{': + if err = e.tokens.pushObject(); err != nil { + break + } + if err = e.tokens.popObject(); err != nil { + panic("BUG: popObject should never fail immediately after pushObject: " + err.Error()) + } + case '[': + if err = e.tokens.pushArray(); err != nil { + break + } + if err = e.tokens.popArray(); err != nil { + panic("BUG: popArray should never fail immediately after pushArray: " + err.Error()) + } + } + if err != nil { + return err + } + + // Finish off the buffer and store it back into e. + e.buf = b + if e.needFlush() { + return e.flush() + } + return nil +} + +// appendWhitespace appends whitespace that immediately precedes the next token. +func (e *Encoder) appendWhitespace(b []byte, next Kind) []byte { + if e.tokens.needDelim(next) == ':' { + return append(b, ' ') + } else { + return e.appendIndent(b, e.tokens.needIndent(next)) + } +} + +// appendIndent appends the appropriate number of indentation characters +// for the current nested level, n. +func (e *Encoder) appendIndent(b []byte, n int) []byte { + if n == 0 { + return b + } + b = append(b, '\n') + b = append(b, e.options.IndentPrefix...) + for ; n > 1; n-- { + b = append(b, e.options.Indent...) + } + return b +} + +// reformatValue parses a JSON value from the start of src and +// appends it to the end of dst, reformatting whitespace and strings as needed. +// It returns the updated versions of dst and src. +func (e *Encoder) reformatValue(dst []byte, src RawValue, depth int) ([]byte, RawValue, error) { + // TODO: Should this update valueFlags as input? + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + var n int + var err error + switch k := Kind(src[0]).normalize(); k { + case 'n': + if n = consumeNull(src); n == 0 { + n, err = consumeLiteral(src, "null") + } + case 'f': + if n = consumeFalse(src); n == 0 { + n, err = consumeLiteral(src, "false") + } + case 't': + if n = consumeTrue(src); n == 0 { + n, err = consumeLiteral(src, "true") + } + case '"': + if n := consumeSimpleString(src); n > 0 && e.options.EscapeRune == nil { + dst, src = append(dst, src[:n]...), src[n:] // copy simple strings verbatim + return dst, src, nil + } + return reformatString(dst, src, !e.options.AllowInvalidUTF8, e.options.preserveRawStrings, e.options.EscapeRune) + case '0': + if n := consumeSimpleNumber(src); n > 0 && !e.options.canonicalizeNumbers { + dst, src = append(dst, src[:n]...), src[n:] // copy simple numbers verbatim + return dst, src, nil + } + return reformatNumber(dst, src, e.options.canonicalizeNumbers) + case '{': + return e.reformatObject(dst, src, depth) + case '[': + return e.reformatArray(dst, src, depth) + default: + return dst, src, newInvalidCharacterError(src, "at start of value") + } + if err != nil { + return dst, src, err + } + dst, src = append(dst, src[:n]...), src[n:] + return dst, src, nil +} + +// reformatObject parses a JSON object from the start of src and +// appends it to the end of src, reformatting whitespace and strings as needed. +// It returns the updated versions of dst and src. +func (e *Encoder) reformatObject(dst []byte, src RawValue, depth int) ([]byte, RawValue, error) { + // Append object start. + if src[0] != '{' { + panic("BUG: reformatObject must be called with a buffer that starts with '{'") + } + dst, src = append(dst, '{'), src[1:] + + // Append (possible) object end. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + if src[0] == '}' { + dst, src = append(dst, '}'), src[1:] + return dst, src, nil + } + + var err error + var names *objectNamespace + if !e.options.AllowDuplicateNames { + e.namespaces.push() + defer e.namespaces.pop() + names = e.namespaces.last() + } + depth++ + for { + // Append optional newline and indentation. + if e.options.multiline { + dst = e.appendIndent(dst, depth) + } + + // Append object name. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + n0 := len(dst) // offset before calling reformatString + n := consumeSimpleString(src) + if n > 0 && e.options.EscapeRune == nil { + dst, src = append(dst, src[:n]...), src[n:] // copy simple strings verbatim + } else { + dst, src, err = reformatString(dst, src, !e.options.AllowInvalidUTF8, e.options.preserveRawStrings, e.options.EscapeRune) + } + if err != nil { + return dst, src, err + } + if !e.options.AllowDuplicateNames && !names.insertQuoted(dst[n0:], false) { + return dst, src, &SyntacticError{str: "duplicate name " + string(dst[n0:]) + " in object"} + } + + // Append colon. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + if src[0] != ':' { + return dst, src, newInvalidCharacterError(src, "after object name (expecting ':')") + } + dst, src = append(dst, ':'), src[1:] + if e.options.multiline { + dst = append(dst, ' ') + } + + // Append object value. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + dst, src, err = e.reformatValue(dst, src, depth) + if err != nil { + return dst, src, err + } + + // Append comma or object end. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + switch src[0] { + case ',': + dst, src = append(dst, ','), src[1:] + continue + case '}': + if e.options.multiline { + dst = e.appendIndent(dst, depth-1) + } + dst, src = append(dst, '}'), src[1:] + return dst, src, nil + default: + return dst, src, newInvalidCharacterError(src, "after object value (expecting ',' or '}')") + } + } +} + +// reformatArray parses a JSON array from the start of src and +// appends it to the end of dst, reformatting whitespace and strings as needed. +// It returns the updated versions of dst and src. +func (e *Encoder) reformatArray(dst []byte, src RawValue, depth int) ([]byte, RawValue, error) { + // Append array start. + if src[0] != '[' { + panic("BUG: reformatArray must be called with a buffer that starts with '['") + } + dst, src = append(dst, '['), src[1:] + + // Append (possible) array end. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + if src[0] == ']' { + dst, src = append(dst, ']'), src[1:] + return dst, src, nil + } + + var err error + depth++ + for { + // Append optional newline and indentation. + if e.options.multiline { + dst = e.appendIndent(dst, depth) + } + + // Append array value. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + dst, src, err = e.reformatValue(dst, src, depth) + if err != nil { + return dst, src, err + } + + // Append comma or array end. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + switch src[0] { + case ',': + dst, src = append(dst, ','), src[1:] + continue + case ']': + if e.options.multiline { + dst = e.appendIndent(dst, depth-1) + } + dst, src = append(dst, ']'), src[1:] + return dst, src, nil + default: + return dst, src, newInvalidCharacterError(src, "after array value (expecting ',' or ']')") + } + } +} + +// OutputOffset returns the current output byte offset. It gives the location +// of the next byte immediately after the most recently written token or value. +// The number of bytes actually written to the underlying io.Writer may be less +// than this offset due to internal buffering effects. +func (e *Encoder) OutputOffset() int64 { + return e.previousOffsetEnd() +} + +// UnusedBuffer returns a zero-length buffer with a possible non-zero capacity. +// This buffer is intended to be used to populate a RawValue +// being passed to an immediately succeeding WriteValue call. +// +// Example usage: +// +// b := d.UnusedBuffer() +// b = append(b, '"') +// b = appendString(b, v) // append the string formatting of v +// b = append(b, '"') +// ... := d.WriteValue(b) +// +// It is the user's responsibility to ensure that the value is valid JSON. +func (e *Encoder) UnusedBuffer() []byte { + // NOTE: We don't return e.buf[len(e.buf):cap(e.buf)] since WriteValue would + // need to take special care to avoid mangling the data while reformatting. + // WriteValue can't easily identify whether the input RawValue aliases e.buf + // without using unsafe.Pointer. Thus, we just return a different buffer. + // Should this ever alias e.buf, we need to consider how it operates with + // the specialized performance optimization for bytes.Buffer. + n := 1 << bits.Len(uint(e.maxValue|63)) // fast approximation for max length + if cap(e.unusedCache) < n { + e.unusedCache = make([]byte, 0, n) + } + return e.unusedCache +} + +// StackDepth returns the depth of the state machine for written JSON data. +// Each level on the stack represents a nested JSON object or array. +// It is incremented whenever an ObjectStart or ArrayStart token is encountered +// and decremented whenever an ObjectEnd or ArrayEnd token is encountered. +// The depth is zero-indexed, where zero represents the top-level JSON value. +func (e *Encoder) StackDepth() int { + // NOTE: Keep in sync with Decoder.StackDepth. + return e.tokens.depth() - 1 +} + +// StackIndex returns information about the specified stack level. +// It must be a number between 0 and StackDepth, inclusive. +// For each level, it reports the kind: +// +// - 0 for a level of zero, +// - '{' for a level representing a JSON object, and +// - '[' for a level representing a JSON array. +// +// It also reports the length of that JSON object or array. +// Each name and value in a JSON object is counted separately, +// so the effective number of members would be half the length. +// A complete JSON object must have an even length. +func (e *Encoder) StackIndex(i int) (Kind, int) { + // NOTE: Keep in sync with Decoder.StackIndex. + switch s := e.tokens.index(i); { + case i > 0 && s.isObject(): + return '{', s.length() + case i > 0 && s.isArray(): + return '[', s.length() + default: + return 0, s.length() + } +} + +// StackPointer returns a JSON Pointer (RFC 6901) to the most recently written value. +// Object names are only present if AllowDuplicateNames is false, otherwise +// object members are represented using their index within the object. +func (e *Encoder) StackPointer() string { + e.names.copyQuotedBuffer(e.buf) + return string(e.appendStackPointer(nil)) +} + +// appendString appends src to dst as a JSON string per RFC 7159, section 7. +// +// If validateUTF8 is specified, this rejects input that contains invalid UTF-8 +// otherwise invalid bytes are replaced with the Unicode replacement character. +// If escapeRune is provided, it specifies which runes to escape using +// hexadecimal sequences. If nil, the shortest representable form is used, +// which is also the canonical form for strings (RFC 8785, section 3.2.2.2). +// +// Note that this API allows full control over the formatting of strings +// except for whether a forward solidus '/' may be formatted as '\/' and +// the casing of hexadecimal Unicode escape sequences. +func appendString(dst []byte, src string, validateUTF8 bool, escapeRune func(rune) bool) ([]byte, error) { + appendEscapedASCII := func(dst []byte, c byte) []byte { + switch c { + case '"', '\\': + dst = append(dst, '\\', c) + case '\b': + dst = append(dst, "\\b"...) + case '\f': + dst = append(dst, "\\f"...) + case '\n': + dst = append(dst, "\\n"...) + case '\r': + dst = append(dst, "\\r"...) + case '\t': + dst = append(dst, "\\t"...) + default: + dst = append(dst, "\\u"...) + dst = appendHexUint16(dst, uint16(c)) + } + return dst + } + appendEscapedUnicode := func(dst []byte, r rune) []byte { + if r1, r2 := utf16.EncodeRune(r); r1 != '\ufffd' && r2 != '\ufffd' { + dst = append(dst, "\\u"...) + dst = appendHexUint16(dst, uint16(r1)) + dst = append(dst, "\\u"...) + dst = appendHexUint16(dst, uint16(r2)) + } else { + dst = append(dst, "\\u"...) + dst = appendHexUint16(dst, uint16(r)) + } + return dst + } + + // Optimize for when escapeRune is nil. + if escapeRune == nil { + var i, n int + dst = append(dst, '"') + for uint(len(src)) > uint(n) { + // Handle single-byte ASCII. + if c := src[n]; c < utf8.RuneSelf { + n++ + if c < ' ' || c == '"' || c == '\\' { + dst = append(dst, src[i:n-1]...) + dst = appendEscapedASCII(dst, c) + i = n + } + continue + } + + // Handle multi-byte Unicode. + _, rn := utf8.DecodeRuneInString(src[n:]) + n += rn + if rn == 1 { // must be utf8.RuneError since we already checked for single-byte ASCII + dst = append(dst, src[i:n-rn]...) + if validateUTF8 { + return dst, &SyntacticError{str: "invalid UTF-8 within string"} + } + dst = append(dst, "\ufffd"...) + i = n + } + } + dst = append(dst, src[i:n]...) + dst = append(dst, '"') + return dst, nil + } + + // Slower implementation for when escapeRune is non-nil. + var i, n int + dst = append(dst, '"') + for uint(len(src)) > uint(n) { + switch r, rn := utf8.DecodeRuneInString(src[n:]); { + case r == utf8.RuneError && rn == 1: + dst = append(dst, src[i:n]...) + if validateUTF8 { + return dst, &SyntacticError{str: "invalid UTF-8 within string"} + } + if escapeRune('\ufffd') { + dst = append(dst, `\ufffd`...) + } else { + dst = append(dst, "\ufffd"...) + } + n += rn + i = n + case escapeRune(r): + dst = append(dst, src[i:n]...) + dst = appendEscapedUnicode(dst, r) + n += rn + i = n + case r < ' ' || r == '"' || r == '\\': + dst = append(dst, src[i:n]...) + dst = appendEscapedASCII(dst, byte(r)) + n += rn + i = n + default: + n += rn + } + } + dst = append(dst, src[i:n]...) + dst = append(dst, '"') + return dst, nil +} + +// reformatString consumes a JSON string from src and appends it to dst, +// reformatting it if necessary for the given escapeRune parameter. +// It returns the appended output and the remainder of the input. +func reformatString(dst, src []byte, validateUTF8, preserveRaw bool, escapeRune func(rune) bool) ([]byte, []byte, error) { + // TODO: Should this update valueFlags as input? + var flags valueFlags + n, err := consumeString(&flags, src, validateUTF8) + if err != nil { + return dst, src[n:], err + } + if preserveRaw || (escapeRune == nil && flags.isCanonical()) { + dst = append(dst, src[:n]...) // copy the string verbatim + return dst, src[n:], nil + } + + // TODO: Implement a direct, raw-to-raw reformat for strings. + // If the escapeRune option would have resulted in no changes to the output, + // it would be faster to simply append src to dst without going through + // an intermediary representation in a separate buffer. + b, _ := unescapeString(make([]byte, 0, n), src[:n]) + dst, _ = appendString(dst, string(b), validateUTF8, escapeRune) + return dst, src[n:], nil +} + +// appendNumber appends src to dst as a JSON number per RFC 7159, section 6. +// It formats numbers similar to the ES6 number-to-string conversion. +// See https://go.dev/issue/14135. +// +// The output is identical to ECMA-262, 6th edition, section 7.1.12.1 and with +// RFC 8785, section 3.2.2.3 for 64-bit floating-point numbers except for -0, +// which is formatted as -0 instead of just 0. +// +// For 32-bit floating-point numbers, +// the output is a 32-bit equivalent of the algorithm. +// Note that ECMA-262 specifies no algorithm for 32-bit numbers. +func appendNumber(dst []byte, src float64, bits int) []byte { + if bits == 32 { + src = float64(float32(src)) + } + + abs := math.Abs(src) + fmt := byte('f') + if abs != 0 { + if bits == 64 && (float64(abs) < 1e-6 || float64(abs) >= 1e21) || + bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + dst = strconv.AppendFloat(dst, src, fmt, -1, bits) + if fmt == 'e' { + // Clean up e-09 to e-9. + n := len(dst) + if n >= 4 && dst[n-4] == 'e' && dst[n-3] == '-' && dst[n-2] == '0' { + dst[n-2] = dst[n-1] + dst = dst[:n-1] + } + } + return dst +} + +// reformatNumber consumes a JSON string from src and appends it to dst, +// canonicalizing it if specified. +// It returns the appended output and the remainder of the input. +func reformatNumber(dst, src []byte, canonicalize bool) ([]byte, []byte, error) { + n, err := consumeNumber(src) + if err != nil { + return dst, src[n:], err + } + if !canonicalize { + dst = append(dst, src[:n]...) // copy the number verbatim + return dst, src[n:], nil + } + + // Canonicalize the number per RFC 8785, section 3.2.2.3. + // As an optimization, we can copy integer numbers below 2⁵³ verbatim. + const maxExactIntegerDigits = 16 // len(strconv.AppendUint(nil, 1<<53, 10)) + if n < maxExactIntegerDigits && consumeSimpleNumber(src[:n]) == n { + dst = append(dst, src[:n]...) // copy the number verbatim + return dst, src[n:], nil + } + fv, _ := strconv.ParseFloat(string(src[:n]), 64) + switch { + case fv == 0: + fv = 0 // normalize negative zero as just zero + case math.IsInf(fv, +1): + fv = +math.MaxFloat64 + case math.IsInf(fv, -1): + fv = -math.MaxFloat64 + } + return appendNumber(dst, fv, 64), src[n:], nil +} + +// appendHexUint16 appends src to dst as a 4-byte hexadecimal number. +func appendHexUint16(dst []byte, src uint16) []byte { + dst = append(dst, "0000"[1+(bits.Len16(src)-1)/4:]...) + dst = strconv.AppendUint(dst, uint64(src), 16) + return dst +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/errors.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/errors.go new file mode 100644 index 000000000..35be8601e --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/errors.go @@ -0,0 +1,183 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "errors" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +const errorPrefix = "json: " + +// Error matches errors returned by this package according to errors.Is. +const Error = jsonError("json error") + +type jsonError string + +func (e jsonError) Error() string { + return string(e) +} +func (e jsonError) Is(target error) bool { + return e == target || target == Error +} + +type ioError struct { + action string // either "read" or "write" + err error +} + +func (e *ioError) Error() string { + return errorPrefix + e.action + " error: " + e.err.Error() +} +func (e *ioError) Unwrap() error { + return e.err +} +func (e *ioError) Is(target error) bool { + return e == target || target == Error || errors.Is(e.err, target) +} + +// SemanticError describes an error determining the meaning +// of JSON data as Go data or vice-versa. +// +// The contents of this error as produced by this package may change over time. +type SemanticError struct { + requireKeyedLiterals + nonComparable + + action string // either "marshal" or "unmarshal" + + // ByteOffset indicates that an error occurred after this byte offset. + ByteOffset int64 + // JSONPointer indicates that an error occurred within this JSON value + // as indicated using the JSON Pointer notation (see RFC 6901). + JSONPointer string + + // JSONKind is the JSON kind that could not be handled. + JSONKind Kind // may be zero if unknown + // GoType is the Go type that could not be handled. + GoType reflect.Type // may be nil if unknown + + // Err is the underlying error. + Err error // may be nil +} + +func (e *SemanticError) Error() string { + var sb strings.Builder + sb.WriteString(errorPrefix) + + // Hyrum-proof the error message by deliberately switching between + // two equivalent renderings of the same error message. + // The randomization is tied to the Hyrum-proofing already applied + // on map iteration in Go. + for phrase := range map[string]struct{}{"cannot": {}, "unable to": {}} { + sb.WriteString(phrase) + break // use whichever phrase we get in the first iteration + } + + // Format action. + var preposition string + switch e.action { + case "marshal": + sb.WriteString(" marshal") + preposition = " from" + case "unmarshal": + sb.WriteString(" unmarshal") + preposition = " into" + default: + sb.WriteString(" handle") + preposition = " with" + } + + // Format JSON kind. + var omitPreposition bool + switch e.JSONKind { + case 'n': + sb.WriteString(" JSON null") + case 'f', 't': + sb.WriteString(" JSON boolean") + case '"': + sb.WriteString(" JSON string") + case '0': + sb.WriteString(" JSON number") + case '{', '}': + sb.WriteString(" JSON object") + case '[', ']': + sb.WriteString(" JSON array") + default: + omitPreposition = true + } + + // Format Go type. + if e.GoType != nil { + if !omitPreposition { + sb.WriteString(preposition) + } + sb.WriteString(" Go value of type ") + sb.WriteString(e.GoType.String()) + } + + // Format where. + switch { + case e.JSONPointer != "": + sb.WriteString(" within JSON value at ") + sb.WriteString(strconv.Quote(e.JSONPointer)) + case e.ByteOffset > 0: + sb.WriteString(" after byte offset ") + sb.WriteString(strconv.FormatInt(e.ByteOffset, 10)) + } + + // Format underlying error. + if e.Err != nil { + sb.WriteString(": ") + sb.WriteString(e.Err.Error()) + } + + return sb.String() +} +func (e *SemanticError) Is(target error) bool { + return e == target || target == Error || errors.Is(e.Err, target) +} +func (e *SemanticError) Unwrap() error { + return e.Err +} + +// SyntacticError is a description of a syntactic error that occurred when +// encoding or decoding JSON according to the grammar. +// +// The contents of this error as produced by this package may change over time. +type SyntacticError struct { + requireKeyedLiterals + nonComparable + + // ByteOffset indicates that an error occurred after this byte offset. + ByteOffset int64 + str string +} + +func (e *SyntacticError) Error() string { + return errorPrefix + e.str +} +func (e *SyntacticError) Is(target error) bool { + return e == target || target == Error +} +func (e *SyntacticError) withOffset(pos int64) error { + return &SyntacticError{ByteOffset: pos, str: e.str} +} + +func newInvalidCharacterError(prefix []byte, where string) *SyntacticError { + what := quoteRune(prefix) + return &SyntacticError{str: "invalid character " + what + " " + where} +} + +func quoteRune(b []byte) string { + r, n := utf8.DecodeRune(b) + if r == utf8.RuneError && n == 1 { + return `'\x` + strconv.FormatUint(uint64(b[0]), 16) + `'` + } + return strconv.QuoteRune(r) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fields.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fields.go new file mode 100644 index 000000000..c0ee36166 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fields.go @@ -0,0 +1,509 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +var errIgnoredField = errors.New("ignored field") + +type isZeroer interface { + IsZero() bool +} + +var isZeroerType = reflect.TypeOf((*isZeroer)(nil)).Elem() + +type structFields struct { + flattened []structField // listed in depth-first ordering + byActualName map[string]*structField + byFoldedName map[string][]*structField + inlinedFallback *structField +} + +type structField struct { + id int // unique numeric ID in breadth-first ordering + index []int // index into a struct according to reflect.Type.FieldByIndex + typ reflect.Type + fncs *arshaler + isZero func(addressableValue) bool + isEmpty func(addressableValue) bool + fieldOptions +} + +func makeStructFields(root reflect.Type) (structFields, *SemanticError) { + var fs structFields + fs.byActualName = make(map[string]*structField, root.NumField()) + fs.byFoldedName = make(map[string][]*structField, root.NumField()) + + // ambiguous is a sentinel value to indicate that at least two fields + // at the same depth have the same name, and thus cancel each other out. + // This follows the same rules as selecting a field on embedded structs + // where the shallowest field takes precedence. If more than one field + // exists at the shallowest depth, then the selection is illegal. + // See https://go.dev/ref/spec#Selectors. + ambiguous := new(structField) + + // Setup a queue for a breath-first search. + var queueIndex int + type queueEntry struct { + typ reflect.Type + index []int + visitChildren bool // whether to recursively visit inlined field in this struct + } + queue := []queueEntry{{root, nil, true}} + seen := map[reflect.Type]bool{root: true} + + // Perform a breadth-first search over all reachable fields. + // This ensures that len(f.index) will be monotonically increasing. + for queueIndex < len(queue) { + qe := queue[queueIndex] + queueIndex++ + + t := qe.typ + inlinedFallbackIndex := -1 // index of last inlined fallback field in current struct + namesIndex := make(map[string]int) // index of each field with a given JSON object name in current struct + var hasAnyJSONTag bool // whether any Go struct field has a `json` tag + var hasAnyJSONField bool // whether any JSON serializable fields exist in current struct + for i := 0; i < t.NumField(); i++ { + sf := t.Field(i) + _, hasTag := sf.Tag.Lookup("json") + hasAnyJSONTag = hasAnyJSONTag || hasTag + options, err := parseFieldOptions(sf) + if err != nil { + if err == errIgnoredField { + continue + } + return structFields{}, &SemanticError{GoType: t, Err: err} + } + hasAnyJSONField = true + f := structField{ + // Allocate a new slice (len=N+1) to hold both + // the parent index (len=N) and the current index (len=1). + // Do this to avoid clobbering the memory of the parent index. + index: append(append(make([]int, 0, len(qe.index)+1), qe.index...), i), + typ: sf.Type, + fieldOptions: options, + } + if sf.Anonymous && !f.hasName { + f.inline = true // implied by use of Go embedding without an explicit name + } + if f.inline || f.unknown { + // Handle an inlined field that serializes to/from + // zero or more JSON object members. + + if f.inline && f.unknown { + err := fmt.Errorf("Go struct field %s cannot have both `inline` and `unknown` specified", sf.Name) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + switch f.fieldOptions { + case fieldOptions{name: f.name, quotedName: f.quotedName, inline: true}: + case fieldOptions{name: f.name, quotedName: f.quotedName, unknown: true}: + default: + err := fmt.Errorf("Go struct field %s cannot have any options other than `inline` or `unknown` specified", sf.Name) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + + // Unwrap one level of pointer indirection similar to how Go + // only allows embedding either T or *T, but not **T. + tf := f.typ + if tf.Kind() == reflect.Pointer && tf.Name() == "" { + tf = tf.Elem() + } + // Reject any types with custom serialization otherwise + // it becomes impossible to know what sub-fields to inline. + if which, _ := implementsWhich(tf, + jsonMarshalerV2Type, jsonMarshalerV1Type, textMarshalerType, + jsonUnmarshalerV2Type, jsonUnmarshalerV1Type, textUnmarshalerType, + ); which != nil && tf != rawValueType { + err := fmt.Errorf("inlined Go struct field %s of type %s must not implement JSON marshal or unmarshal methods", sf.Name, tf) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + + // Handle an inlined field that serializes to/from + // a finite number of JSON object members backed by a Go struct. + if tf.Kind() == reflect.Struct { + if f.unknown { + err := fmt.Errorf("inlined Go struct field %s of type %s with `unknown` tag must be a Go map of string key or a json.RawValue", sf.Name, tf) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + if qe.visitChildren { + queue = append(queue, queueEntry{tf, f.index, !seen[tf]}) + } + seen[tf] = true + continue + } + + // Handle an inlined field that serializes to/from any number of + // JSON object members back by a Go map or RawValue. + switch { + case tf == rawValueType: + f.fncs = nil // specially handled in arshal_inlined.go + case tf.Kind() == reflect.Map && tf.Key() == stringType: + f.fncs = lookupArshaler(tf.Elem()) + default: + err := fmt.Errorf("inlined Go struct field %s of type %s must be a Go struct, Go map of string key, or json.RawValue", sf.Name, tf) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + + // Reject multiple inlined fallback fields within the same struct. + if inlinedFallbackIndex >= 0 { + err := fmt.Errorf("inlined Go struct fields %s and %s cannot both be a Go map or json.RawValue", t.Field(inlinedFallbackIndex).Name, sf.Name) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + inlinedFallbackIndex = i + + // Multiple inlined fallback fields across different structs + // follow the same precedence rules as Go struct embedding. + if fs.inlinedFallback == nil { + fs.inlinedFallback = &f // store first occurrence at lowest depth + } else if len(fs.inlinedFallback.index) == len(f.index) { + fs.inlinedFallback = ambiguous // at least two occurrences at same depth + } + } else { + // Handle normal Go struct field that serializes to/from + // a single JSON object member. + + // Provide a function that uses a type's IsZero method. + switch { + case sf.Type.Kind() == reflect.Interface && sf.Type.Implements(isZeroerType): + f.isZero = func(va addressableValue) bool { + // Avoid panics calling IsZero on a nil interface or + // non-nil interface with nil pointer. + return va.IsNil() || (va.Elem().Kind() == reflect.Pointer && va.Elem().IsNil()) || va.Interface().(isZeroer).IsZero() + } + case sf.Type.Kind() == reflect.Pointer && sf.Type.Implements(isZeroerType): + f.isZero = func(va addressableValue) bool { + // Avoid panics calling IsZero on nil pointer. + return va.IsNil() || va.Interface().(isZeroer).IsZero() + } + case sf.Type.Implements(isZeroerType): + f.isZero = func(va addressableValue) bool { return va.Interface().(isZeroer).IsZero() } + case reflect.PointerTo(sf.Type).Implements(isZeroerType): + f.isZero = func(va addressableValue) bool { return va.Addr().Interface().(isZeroer).IsZero() } + } + + // Provide a function that can determine whether the value would + // serialize as an empty JSON value. + switch sf.Type.Kind() { + case reflect.String, reflect.Map, reflect.Array, reflect.Slice: + f.isEmpty = func(va addressableValue) bool { return va.Len() == 0 } + case reflect.Pointer, reflect.Interface: + f.isEmpty = func(va addressableValue) bool { return va.IsNil() } + } + + f.id = len(fs.flattened) + f.fncs = lookupArshaler(sf.Type) + fs.flattened = append(fs.flattened, f) + + // Reject user-specified names with invalid UTF-8. + if !utf8.ValidString(f.name) { + err := fmt.Errorf("Go struct field %s has JSON object name %q with invalid UTF-8", sf.Name, f.name) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + // Reject multiple fields with same name within the same struct. + if j, ok := namesIndex[f.name]; ok { + err := fmt.Errorf("Go struct fields %s and %s conflict over JSON object name %q", t.Field(j).Name, sf.Name, f.name) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + namesIndex[f.name] = i + + // Multiple fields of the same name across different structs + // follow the same precedence rules as Go struct embedding. + if f2 := fs.byActualName[f.name]; f2 == nil { + fs.byActualName[f.name] = &fs.flattened[len(fs.flattened)-1] // store first occurrence at lowest depth + } else if len(f2.index) == len(f.index) { + fs.byActualName[f.name] = ambiguous // at least two occurrences at same depth + } + } + } + + // NOTE: New users to the json package are occasionally surprised that + // unexported fields are ignored. This occurs by necessity due to our + // inability to directly introspect such fields with Go reflection + // without the use of unsafe. + // + // To reduce friction here, refuse to serialize any Go struct that + // has no JSON serializable fields, has at least one Go struct field, + // and does not have any `json` tags present. For example, + // errors returned by errors.New would fail to serialize. + isEmptyStruct := t.NumField() == 0 + if !isEmptyStruct && !hasAnyJSONTag && !hasAnyJSONField { + err := errors.New("Go struct has no exported fields") + return structFields{}, &SemanticError{GoType: t, Err: err} + } + } + + // Remove all fields that are duplicates. + // This may move elements forward to fill the holes from removed fields. + var n int + for _, f := range fs.flattened { + switch f2 := fs.byActualName[f.name]; { + case f2 == ambiguous: + delete(fs.byActualName, f.name) + case f2 == nil: + continue // may be nil due to previous delete + // TODO(https://go.dev/issue/45955): Use slices.Equal. + case reflect.DeepEqual(f.index, f2.index): + f.id = n + fs.flattened[n] = f + fs.byActualName[f.name] = &fs.flattened[n] // fix pointer to new location + n++ + } + } + fs.flattened = fs.flattened[:n] + if fs.inlinedFallback == ambiguous { + fs.inlinedFallback = nil + } + if len(fs.flattened) != len(fs.byActualName) { + panic(fmt.Sprintf("BUG: flattened list of fields mismatches fields mapped by name: %d != %d", len(fs.flattened), len(fs.byActualName))) + } + + // Sort the fields according to a depth-first ordering. + // This operation will cause pointers in byActualName to become incorrect, + // which we will correct in another loop shortly thereafter. + sort.Slice(fs.flattened, func(i, j int) bool { + si := fs.flattened[i].index + sj := fs.flattened[j].index + for len(si) > 0 && len(sj) > 0 { + switch { + case si[0] < sj[0]: + return true + case si[0] > sj[0]: + return false + default: + si = si[1:] + sj = sj[1:] + } + } + return len(si) < len(sj) + }) + + // Recompute the mapping of fields in the byActualName map. + // Pre-fold all names so that we can lookup folded names quickly. + for i, f := range fs.flattened { + foldedName := string(foldName([]byte(f.name))) + fs.byActualName[f.name] = &fs.flattened[i] + fs.byFoldedName[foldedName] = append(fs.byFoldedName[foldedName], &fs.flattened[i]) + } + for foldedName, fields := range fs.byFoldedName { + if len(fields) > 1 { + // The precedence order for conflicting nocase names + // is by breadth-first order, rather than depth-first order. + sort.Slice(fields, func(i, j int) bool { + return fields[i].id < fields[j].id + }) + fs.byFoldedName[foldedName] = fields + } + } + + return fs, nil +} + +type fieldOptions struct { + name string + quotedName string // quoted name per RFC 8785, section 3.2.2.2. + hasName bool + nocase bool + inline bool + unknown bool + omitzero bool + omitempty bool + string bool + format string +} + +// parseFieldOptions parses the `json` tag in a Go struct field as +// a structured set of options configuring parameters such as +// the JSON member name and other features. +// As a special case, it returns errIgnoredField if the field is ignored. +func parseFieldOptions(sf reflect.StructField) (out fieldOptions, err error) { + tag, hasTag := sf.Tag.Lookup("json") + + // Check whether this field is explicitly ignored. + if tag == "-" { + return fieldOptions{}, errIgnoredField + } + + // Check whether this field is unexported. + if !sf.IsExported() { + // In contrast to v1, v2 no longer forwards exported fields from + // embedded fields of unexported types since Go reflection does not + // allow the same set of operations that are available in normal cases + // of purely exported fields. + // See https://go.dev/issue/21357 and https://go.dev/issue/24153. + if sf.Anonymous { + return fieldOptions{}, fmt.Errorf("embedded Go struct field %s of an unexported type must be explicitly ignored with a `json:\"-\"` tag", sf.Type.Name()) + } + // Tag options specified on an unexported field suggests user error. + if hasTag { + return fieldOptions{}, fmt.Errorf("unexported Go struct field %s cannot have non-ignored `json:%q` tag", sf.Name, tag) + } + return fieldOptions{}, errIgnoredField + } + + // Determine the JSON member name for this Go field. A user-specified name + // may be provided as either an identifier or a single-quoted string. + // The single-quoted string allows arbitrary characters in the name. + // See https://go.dev/issue/2718 and https://go.dev/issue/3546. + out.name = sf.Name // always starts with an uppercase character + if len(tag) > 0 && !strings.HasPrefix(tag, ",") { + // For better compatibility with v1, accept almost any unescaped name. + n := len(tag) - len(strings.TrimLeftFunc(tag, func(r rune) bool { + return !strings.ContainsRune(",\\'\"`", r) // reserve comma, backslash, and quotes + })) + opt := tag[:n] + if n == 0 { + // Allow a single quoted string for arbitrary names. + opt, n, err = consumeTagOption(tag) + if err != nil { + return fieldOptions{}, fmt.Errorf("Go struct field %s has malformed `json` tag: %v", sf.Name, err) + } + } + out.hasName = true + out.name = opt + tag = tag[n:] + } + b, _ := appendString(nil, out.name, false, nil) + out.quotedName = string(b) + + // Handle any additional tag options (if any). + var wasFormat bool + seenOpts := make(map[string]bool) + for len(tag) > 0 { + // Consume comma delimiter. + if tag[0] != ',' { + return fieldOptions{}, fmt.Errorf("Go struct field %s has malformed `json` tag: invalid character %q before next option (expecting ',')", sf.Name, tag[0]) + } + tag = tag[len(","):] + if len(tag) == 0 { + return fieldOptions{}, fmt.Errorf("Go struct field %s has malformed `json` tag: invalid trailing ',' character", sf.Name) + } + + // Consume and process the tag option. + opt, n, err := consumeTagOption(tag) + if err != nil { + return fieldOptions{}, fmt.Errorf("Go struct field %s has malformed `json` tag: %v", sf.Name, err) + } + rawOpt := tag[:n] + tag = tag[n:] + switch { + case wasFormat: + return fieldOptions{}, fmt.Errorf("Go struct field %s has `format` tag option that was not specified last", sf.Name) + case strings.HasPrefix(rawOpt, "'") && strings.TrimFunc(opt, isLetterOrDigit) == "": + return fieldOptions{}, fmt.Errorf("Go struct field %s has unnecessarily quoted appearance of `%s` tag option; specify `%s` instead", sf.Name, rawOpt, opt) + } + switch opt { + case "nocase": + out.nocase = true + case "inline": + out.inline = true + case "unknown": + out.unknown = true + case "omitzero": + out.omitzero = true + case "omitempty": + out.omitempty = true + case "string": + out.string = true + case "format": + if !strings.HasPrefix(tag, ":") { + return fieldOptions{}, fmt.Errorf("Go struct field %s is missing value for `format` tag option", sf.Name) + } + tag = tag[len(":"):] + opt, n, err := consumeTagOption(tag) + if err != nil { + return fieldOptions{}, fmt.Errorf("Go struct field %s has malformed value for `format` tag option: %v", sf.Name, err) + } + tag = tag[n:] + out.format = opt + wasFormat = true + default: + // Reject keys that resemble one of the supported options. + // This catches invalid mutants such as "omitEmpty" or "omit_empty". + normOpt := strings.ReplaceAll(strings.ToLower(opt), "_", "") + switch normOpt { + case "nocase", "inline", "unknown", "omitzero", "omitempty", "string", "format": + return fieldOptions{}, fmt.Errorf("Go struct field %s has invalid appearance of `%s` tag option; specify `%s` instead", sf.Name, opt, normOpt) + } + + // NOTE: Everything else is ignored. This does not mean it is + // forward compatible to insert arbitrary tag options since + // a future version of this package may understand that tag. + } + + // Reject duplicates. + if seenOpts[opt] { + return fieldOptions{}, fmt.Errorf("Go struct field %s has duplicate appearance of `%s` tag option", sf.Name, rawOpt) + } + seenOpts[opt] = true + } + return out, nil +} + +func consumeTagOption(in string) (string, int, error) { + switch r, _ := utf8.DecodeRuneInString(in); { + // Option as a Go identifier. + case r == '_' || unicode.IsLetter(r): + n := len(in) - len(strings.TrimLeftFunc(in, isLetterOrDigit)) + return in[:n], n, nil + // Option as a single-quoted string. + case r == '\'': + // The grammar is nearly identical to a double-quoted Go string literal, + // but uses single quotes as the terminators. The reason for a custom + // grammar is because both backtick and double quotes cannot be used + // verbatim in a struct tag. + // + // Convert a single-quoted string to a double-quote string and rely on + // strconv.Unquote to handle the rest. + var inEscape bool + b := []byte{'"'} + n := len(`'`) + for len(in) > n { + r, rn := utf8.DecodeRuneInString(in[n:]) + switch { + case inEscape: + if r == '\'' { + b = b[:len(b)-1] // remove escape character: `\'` => `'` + } + inEscape = false + case r == '\\': + inEscape = true + case r == '"': + b = append(b, '\\') // insert escape character: `"` => `\"` + case r == '\'': + b = append(b, '"') + n += len(`'`) + out, err := strconv.Unquote(string(b)) + if err != nil { + return "", 0, fmt.Errorf("invalid single-quoted string: %s", in[:n]) + } + return out, n, nil + } + b = append(b, in[n:][:rn]...) + n += rn + } + if n > 10 { + n = 10 // limit the amount of context printed in the error + } + return "", 0, fmt.Errorf("single-quoted string not terminated: %s...", in[:n]) + case len(in) == 0: + return "", 0, io.ErrUnexpectedEOF + default: + return "", 0, fmt.Errorf("invalid character %q at start of option (expecting Unicode letter or single quote)", r) + } +} + +func isLetterOrDigit(r rune) bool { + return r == '_' || unicode.IsLetter(r) || unicode.IsNumber(r) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fold.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fold.go new file mode 100644 index 000000000..9ab735814 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fold.go @@ -0,0 +1,56 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "unicode" + "unicode/utf8" +) + +// foldName returns a folded string such that foldName(x) == foldName(y) +// is similar to strings.EqualFold(x, y), but ignores underscore and dashes. +// This allows foldName to match common naming conventions. +func foldName(in []byte) []byte { + // This is inlinable to take advantage of "function outlining". + // See https://blog.filippo.io/efficient-go-apis-with-the-inliner/ + var arr [32]byte // large enough for most JSON names + return appendFoldedName(arr[:0], in) +} +func appendFoldedName(out, in []byte) []byte { + for i := 0; i < len(in); { + // Handle single-byte ASCII. + if c := in[i]; c < utf8.RuneSelf { + if c != '_' && c != '-' { + if 'a' <= c && c <= 'z' { + c -= 'a' - 'A' + } + out = append(out, c) + } + i++ + continue + } + // Handle multi-byte Unicode. + r, n := utf8.DecodeRune(in[i:]) + out = utf8.AppendRune(out, foldRune(r)) + i += n + } + return out +} + +// foldRune is a variation on unicode.SimpleFold that returns the same rune +// for all runes in the same fold set. +// +// Invariant: +// +// foldRune(x) == foldRune(y) ⇔ strings.EqualFold(string(x), string(y)) +func foldRune(r rune) rune { + for { + r2 := unicode.SimpleFold(r) + if r2 <= r { + return r2 // smallest character in the fold set + } + r = r2 + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/intern.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/intern.go new file mode 100644 index 000000000..700a56db0 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/intern.go @@ -0,0 +1,86 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "encoding/binary" + "math/bits" +) + +// stringCache is a cache for strings converted from a []byte. +type stringCache [256]string // 256*unsafe.Sizeof(string("")) => 4KiB + +// make returns the string form of b. +// It returns a pre-allocated string from c if present, otherwise +// it allocates a new string, inserts it into the cache, and returns it. +func (c *stringCache) make(b []byte) string { + const ( + minCachedLen = 2 // single byte strings are already interned by the runtime + maxCachedLen = 256 // large enough for UUIDs, IPv6 addresses, SHA-256 checksums, etc. + ) + if c == nil || len(b) < minCachedLen || len(b) > maxCachedLen { + return string(b) + } + + // Compute a hash from the fixed-width prefix and suffix of the string. + // This ensures hashing a string is a constant time operation. + var h uint32 + switch { + case len(b) >= 8: + lo := binary.LittleEndian.Uint64(b[:8]) + hi := binary.LittleEndian.Uint64(b[len(b)-8:]) + h = hash64(uint32(lo), uint32(lo>>32)) ^ hash64(uint32(hi), uint32(hi>>32)) + case len(b) >= 4: + lo := binary.LittleEndian.Uint32(b[:4]) + hi := binary.LittleEndian.Uint32(b[len(b)-4:]) + h = hash64(lo, hi) + case len(b) >= 2: + lo := binary.LittleEndian.Uint16(b[:2]) + hi := binary.LittleEndian.Uint16(b[len(b)-2:]) + h = hash64(uint32(lo), uint32(hi)) + } + + // Check the cache for the string. + i := h % uint32(len(*c)) + if s := (*c)[i]; s == string(b) { + return s + } + s := string(b) + (*c)[i] = s + return s +} + +// hash64 returns the hash of two uint32s as a single uint32. +func hash64(lo, hi uint32) uint32 { + // If avalanche=true, this is identical to XXH32 hash on a 8B string: + // var b [8]byte + // binary.LittleEndian.PutUint32(b[:4], lo) + // binary.LittleEndian.PutUint32(b[4:], hi) + // return xxhash.Sum32(b[:]) + const ( + prime1 = 0x9e3779b1 + prime2 = 0x85ebca77 + prime3 = 0xc2b2ae3d + prime4 = 0x27d4eb2f + prime5 = 0x165667b1 + ) + h := prime5 + uint32(8) + h += lo * prime3 + h = bits.RotateLeft32(h, 17) * prime4 + h += hi * prime3 + h = bits.RotateLeft32(h, 17) * prime4 + // Skip final mix (avalanche) step of XXH32 for performance reasons. + // Empirical testing shows that the improvements in unbiased distribution + // does not outweigh the extra cost in computational complexity. + const avalanche = false + if avalanche { + h ^= h >> 15 + h *= prime2 + h ^= h >> 13 + h *= prime3 + h ^= h >> 16 + } + return h +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go new file mode 100644 index 000000000..60e93270f --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go @@ -0,0 +1,182 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "io" + "math/bits" + "sort" + "sync" +) + +// TODO(https://go.dev/issue/47657): Use sync.PoolOf. + +var ( + // This owns the internal buffer since there is no io.Writer to output to. + // Since the buffer can get arbitrarily large in normal usage, + // there is statistical tracking logic to determine whether to recycle + // the internal buffer or not based on a history of utilization. + bufferedEncoderPool = &sync.Pool{New: func() any { return new(Encoder) }} + + // This owns the internal buffer, but it is only used to temporarily store + // buffered JSON before flushing it to the underlying io.Writer. + // In a sufficiently efficient streaming mode, we do not expect the buffer + // to grow arbitrarily large. Thus, we avoid recycling large buffers. + streamingEncoderPool = &sync.Pool{New: func() any { return new(Encoder) }} + + // This does not own the internal buffer since + // it is taken directly from the provided bytes.Buffer. + bytesBufferEncoderPool = &sync.Pool{New: func() any { return new(Encoder) }} +) + +// bufferStatistics is statistics to track buffer utilization. +// It is used to determine whether to recycle a buffer or not +// to avoid https://go.dev/issue/23199. +type bufferStatistics struct { + strikes int // number of times the buffer was under-utilized + prevLen int // length of previous buffer +} + +func getBufferedEncoder(o EncodeOptions) *Encoder { + e := bufferedEncoderPool.Get().(*Encoder) + if e.buf == nil { + // Round up to nearest 2ⁿ to make best use of malloc size classes. + // See runtime/sizeclasses.go on Go1.15. + // Logical OR with 63 to ensure 64 as the minimum buffer size. + n := 1 << bits.Len(uint(e.bufStats.prevLen|63)) + e.buf = make([]byte, 0, n) + } + e.reset(e.buf[:0], nil, o) + return e +} +func putBufferedEncoder(e *Encoder) { + // Recycle large buffers only if sufficiently utilized. + // If a buffer is under-utilized enough times sequentially, + // then it is discarded, ensuring that a single large buffer + // won't be kept alive by a continuous stream of small usages. + // + // The worst case utilization is computed as: + // MIN_UTILIZATION_THRESHOLD / (1 + MAX_NUM_STRIKES) + // + // For the constants chosen below, this is (25%)/(1+4) ⇒ 5%. + // This may seem low, but it ensures a lower bound on + // the absolute worst-case utilization. Without this check, + // this would be theoretically 0%, which is infinitely worse. + // + // See https://go.dev/issue/27735. + switch { + case cap(e.buf) <= 4<<10: // always recycle buffers smaller than 4KiB + e.bufStats.strikes = 0 + case cap(e.buf)/4 <= len(e.buf): // at least 25% utilization + e.bufStats.strikes = 0 + case e.bufStats.strikes < 4: // at most 4 strikes + e.bufStats.strikes++ + default: // discard the buffer; too large and too often under-utilized + e.bufStats.strikes = 0 + e.bufStats.prevLen = len(e.buf) // heuristic for size to allocate next time + e.buf = nil + } + bufferedEncoderPool.Put(e) +} + +func getStreamingEncoder(w io.Writer, o EncodeOptions) *Encoder { + if _, ok := w.(*bytes.Buffer); ok { + e := bytesBufferEncoderPool.Get().(*Encoder) + e.reset(nil, w, o) // buffer taken from bytes.Buffer + return e + } else { + e := streamingEncoderPool.Get().(*Encoder) + e.reset(e.buf[:0], w, o) // preserve existing buffer + return e + } +} +func putStreamingEncoder(e *Encoder) { + if _, ok := e.wr.(*bytes.Buffer); ok { + bytesBufferEncoderPool.Put(e) + } else { + if cap(e.buf) > 64<<10 { + e.buf = nil // avoid pinning arbitrarily large amounts of memory + } + streamingEncoderPool.Put(e) + } +} + +var ( + // This does not own the internal buffer since it is externally provided. + bufferedDecoderPool = &sync.Pool{New: func() any { return new(Decoder) }} + + // This owns the internal buffer, but it is only used to temporarily store + // buffered JSON fetched from the underlying io.Reader. + // In a sufficiently efficient streaming mode, we do not expect the buffer + // to grow arbitrarily large. Thus, we avoid recycling large buffers. + streamingDecoderPool = &sync.Pool{New: func() any { return new(Decoder) }} + + // This does not own the internal buffer since + // it is taken directly from the provided bytes.Buffer. + bytesBufferDecoderPool = bufferedDecoderPool +) + +func getBufferedDecoder(b []byte, o DecodeOptions) *Decoder { + d := bufferedDecoderPool.Get().(*Decoder) + d.reset(b, nil, o) + return d +} +func putBufferedDecoder(d *Decoder) { + bufferedDecoderPool.Put(d) +} + +func getStreamingDecoder(r io.Reader, o DecodeOptions) *Decoder { + if _, ok := r.(*bytes.Buffer); ok { + d := bytesBufferDecoderPool.Get().(*Decoder) + d.reset(nil, r, o) // buffer taken from bytes.Buffer + return d + } else { + d := streamingDecoderPool.Get().(*Decoder) + d.reset(d.buf[:0], r, o) // preserve existing buffer + return d + } +} +func putStreamingDecoder(d *Decoder) { + if _, ok := d.rd.(*bytes.Buffer); ok { + bytesBufferDecoderPool.Put(d) + } else { + if cap(d.buf) > 64<<10 { + d.buf = nil // avoid pinning arbitrarily large amounts of memory + } + streamingDecoderPool.Put(d) + } +} + +var stringsPools = &sync.Pool{New: func() any { return new(stringSlice) }} + +type stringSlice []string + +// getStrings returns a non-nil pointer to a slice with length n. +func getStrings(n int) *stringSlice { + s := stringsPools.Get().(*stringSlice) + if cap(*s) < n { + *s = make([]string, n) + } + *s = (*s)[:n] + return s +} + +func putStrings(s *stringSlice) { + if cap(*s) > 1<<10 { + *s = nil // avoid pinning arbitrarily large amounts of memory + } + stringsPools.Put(s) +} + +// Sort sorts the string slice according to RFC 8785, section 3.2.3. +func (ss *stringSlice) Sort() { + // TODO(https://go.dev/issue/47619): Use slices.SortFunc instead. + sort.Sort(ss) +} + +func (ss *stringSlice) Len() int { return len(*ss) } +func (ss *stringSlice) Less(i, j int) bool { return lessUTF16((*ss)[i], (*ss)[j]) } +func (ss *stringSlice) Swap(i, j int) { (*ss)[i], (*ss)[j] = (*ss)[j], (*ss)[i] } diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go new file mode 100644 index 000000000..ee14c753f --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go @@ -0,0 +1,747 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "math" + "strconv" +) + +var ( + errMissingName = &SyntacticError{str: "missing string for object name"} + errMissingColon = &SyntacticError{str: "missing character ':' after object name"} + errMissingValue = &SyntacticError{str: "missing value after object name"} + errMissingComma = &SyntacticError{str: "missing character ',' after object or array value"} + errMismatchDelim = &SyntacticError{str: "mismatching structural token for object or array"} +) + +const errInvalidNamespace = jsonError("object namespace is in an invalid state") + +type state struct { + // tokens validates whether the next token kind is valid. + tokens stateMachine + + // names is a stack of object names. + // Not used if AllowDuplicateNames is true. + names objectNameStack + + // namespaces is a stack of object namespaces. + // For performance reasons, Encoder or Decoder may not update this + // if Marshal or Unmarshal is able to track names in a more efficient way. + // See makeMapArshaler and makeStructArshaler. + // Not used if AllowDuplicateNames is true. + namespaces objectNamespaceStack +} + +func (s *state) reset() { + s.tokens.reset() + s.names.reset() + s.namespaces.reset() +} + +// appendStackPointer appends a JSON Pointer (RFC 6901) to the current value. +// The returned pointer is only accurate if s.names is populated, +// otherwise it uses the numeric index as the object member name. +// +// Invariant: Must call s.names.copyQuotedBuffer beforehand. +func (s state) appendStackPointer(b []byte) []byte { + var objectDepth int + for i := 1; i < s.tokens.depth(); i++ { + e := s.tokens.index(i) + if e.length() == 0 { + break // empty object or array + } + b = append(b, '/') + switch { + case e.isObject(): + if objectDepth < s.names.length() { + for _, c := range s.names.getUnquoted(objectDepth) { + // Per RFC 6901, section 3, escape '~' and '/' characters. + switch c { + case '~': + b = append(b, "~0"...) + case '/': + b = append(b, "~1"...) + default: + b = append(b, c) + } + } + } else { + // Since the names stack is unpopulated, the name is unknown. + // As a best-effort replacement, use the numeric member index. + // While inaccurate, it produces a syntactically valid pointer. + b = strconv.AppendUint(b, uint64((e.length()-1)/2), 10) + } + objectDepth++ + case e.isArray(): + b = strconv.AppendUint(b, uint64(e.length()-1), 10) + } + } + return b +} + +// stateMachine is a push-down automaton that validates whether +// a sequence of tokens is valid or not according to the JSON grammar. +// It is useful for both encoding and decoding. +// +// It is a stack where each entry represents a nested JSON object or array. +// The stack has a minimum depth of 1 where the first level is a +// virtual JSON array to handle a stream of top-level JSON values. +// The top-level virtual JSON array is special in that it doesn't require commas +// between each JSON value. +// +// For performance, most methods are carefully written to be inlineable. +// The zero value is a valid state machine ready for use. +type stateMachine struct { + stack []stateEntry + last stateEntry +} + +// reset resets the state machine. +// The machine always starts with a minimum depth of 1. +func (m *stateMachine) reset() { + m.stack = m.stack[:0] + if cap(m.stack) > 1<<10 { + m.stack = nil + } + m.last = stateTypeArray +} + +// depth is the current nested depth of JSON objects and arrays. +// It is one-indexed (i.e., top-level values have a depth of 1). +func (m stateMachine) depth() int { + return len(m.stack) + 1 +} + +// index returns a reference to the ith entry. +// It is only valid until the next push method call. +func (m *stateMachine) index(i int) *stateEntry { + if i == len(m.stack) { + return &m.last + } + return &m.stack[i] +} + +// depthLength reports the current nested depth and +// the length of the last JSON object or array. +func (m stateMachine) depthLength() (int, int) { + return m.depth(), m.last.length() +} + +// appendLiteral appends a JSON literal as the next token in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) appendLiteral() error { + switch { + case m.last.needObjectName(): + return errMissingName + case !m.last.isValidNamespace(): + return errInvalidNamespace + default: + m.last.increment() + return nil + } +} + +// appendString appends a JSON string as the next token in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) appendString() error { + switch { + case !m.last.isValidNamespace(): + return errInvalidNamespace + default: + m.last.increment() + return nil + } +} + +// appendNumber appends a JSON number as the next token in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) appendNumber() error { + return m.appendLiteral() +} + +// pushObject appends a JSON start object token as next in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) pushObject() error { + switch { + case m.last.needObjectName(): + return errMissingName + case !m.last.isValidNamespace(): + return errInvalidNamespace + default: + m.last.increment() + m.stack = append(m.stack, m.last) + m.last = stateTypeObject + return nil + } +} + +// popObject appends a JSON end object token as next in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) popObject() error { + switch { + case !m.last.isObject(): + return errMismatchDelim + case m.last.needObjectValue(): + return errMissingValue + case !m.last.isValidNamespace(): + return errInvalidNamespace + default: + m.last = m.stack[len(m.stack)-1] + m.stack = m.stack[:len(m.stack)-1] + return nil + } +} + +// pushArray appends a JSON start array token as next in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) pushArray() error { + switch { + case m.last.needObjectName(): + return errMissingName + case !m.last.isValidNamespace(): + return errInvalidNamespace + default: + m.last.increment() + m.stack = append(m.stack, m.last) + m.last = stateTypeArray + return nil + } +} + +// popArray appends a JSON end array token as next in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) popArray() error { + switch { + case !m.last.isArray() || len(m.stack) == 0: // forbid popping top-level virtual JSON array + return errMismatchDelim + case !m.last.isValidNamespace(): + return errInvalidNamespace + default: + m.last = m.stack[len(m.stack)-1] + m.stack = m.stack[:len(m.stack)-1] + return nil + } +} + +// needIndent reports whether indent whitespace should be injected. +// A zero value means that no whitespace should be injected. +// A positive value means '\n', indentPrefix, and (n-1) copies of indentBody +// should be appended to the output immediately before the next token. +func (m stateMachine) needIndent(next Kind) (n int) { + willEnd := next == '}' || next == ']' + switch { + case m.depth() == 1: + return 0 // top-level values are never indented + case m.last.length() == 0 && willEnd: + return 0 // an empty object or array is never indented + case m.last.length() == 0 || m.last.needImplicitComma(next): + return m.depth() + case willEnd: + return m.depth() - 1 + default: + return 0 + } +} + +// mayAppendDelim appends a colon or comma that may precede the next token. +func (m stateMachine) mayAppendDelim(b []byte, next Kind) []byte { + switch { + case m.last.needImplicitColon(): + return append(b, ':') + case m.last.needImplicitComma(next) && len(m.stack) != 0: // comma not needed for top-level values + return append(b, ',') + default: + return b + } +} + +// needDelim reports whether a colon or comma token should be implicitly emitted +// before the next token of the specified kind. +// A zero value means no delimiter should be emitted. +func (m stateMachine) needDelim(next Kind) (delim byte) { + switch { + case m.last.needImplicitColon(): + return ':' + case m.last.needImplicitComma(next) && len(m.stack) != 0: // comma not needed for top-level values + return ',' + default: + return 0 + } +} + +// checkDelim reports whether the specified delimiter should be there given +// the kind of the next token that appears immediately afterwards. +func (m stateMachine) checkDelim(delim byte, next Kind) error { + switch needDelim := m.needDelim(next); { + case needDelim == delim: + return nil + case needDelim == ':': + return errMissingColon + case needDelim == ',': + return errMissingComma + default: + return newInvalidCharacterError([]byte{delim}, "before next token") + } +} + +// invalidateDisabledNamespaces marks all disabled namespaces as invalid. +// +// For efficiency, Marshal and Unmarshal may disable namespaces since there are +// more efficient ways to track duplicate names. However, if an error occurs, +// the namespaces in Encoder or Decoder will be left in an inconsistent state. +// Mark the namespaces as invalid so that future method calls on +// Encoder or Decoder will return an error. +func (m *stateMachine) invalidateDisabledNamespaces() { + for i := 0; i < m.depth(); i++ { + e := m.index(i) + if !e.isActiveNamespace() { + e.invalidateNamespace() + } + } +} + +// stateEntry encodes several artifacts within a single unsigned integer: +// - whether this represents a JSON object or array, +// - whether this object should check for duplicate names, and +// - how many elements are in this JSON object or array. +type stateEntry uint64 + +const ( + // The type mask (1 bit) records whether this is a JSON object or array. + stateTypeMask stateEntry = 0x8000_0000_0000_0000 + stateTypeObject stateEntry = 0x8000_0000_0000_0000 + stateTypeArray stateEntry = 0x0000_0000_0000_0000 + + // The name check mask (2 bit) records whether to update + // the namespaces for the current JSON object and + // whether the namespace is valid. + stateNamespaceMask stateEntry = 0x6000_0000_0000_0000 + stateDisableNamespace stateEntry = 0x4000_0000_0000_0000 + stateInvalidNamespace stateEntry = 0x2000_0000_0000_0000 + + // The count mask (61 bits) records the number of elements. + stateCountMask stateEntry = 0x1fff_ffff_ffff_ffff + stateCountLSBMask stateEntry = 0x0000_0000_0000_0001 + stateCountOdd stateEntry = 0x0000_0000_0000_0001 + stateCountEven stateEntry = 0x0000_0000_0000_0000 +) + +// length reports the number of elements in the JSON object or array. +// Each name and value in an object entry is treated as a separate element. +func (e stateEntry) length() int { + return int(e & stateCountMask) +} + +// isObject reports whether this is a JSON object. +func (e stateEntry) isObject() bool { + return e&stateTypeMask == stateTypeObject +} + +// isArray reports whether this is a JSON array. +func (e stateEntry) isArray() bool { + return e&stateTypeMask == stateTypeArray +} + +// needObjectName reports whether the next token must be a JSON string, +// which is necessary for JSON object names. +func (e stateEntry) needObjectName() bool { + return e&(stateTypeMask|stateCountLSBMask) == stateTypeObject|stateCountEven +} + +// needImplicitColon reports whether an colon should occur next, +// which always occurs after JSON object names. +func (e stateEntry) needImplicitColon() bool { + return e.needObjectValue() +} + +// needObjectValue reports whether the next token must be a JSON value, +// which is necessary after every JSON object name. +func (e stateEntry) needObjectValue() bool { + return e&(stateTypeMask|stateCountLSBMask) == stateTypeObject|stateCountOdd +} + +// needImplicitComma reports whether an comma should occur next, +// which always occurs after a value in a JSON object or array +// before the next value (or name). +func (e stateEntry) needImplicitComma(next Kind) bool { + return !e.needObjectValue() && e.length() > 0 && next != '}' && next != ']' +} + +// increment increments the number of elements for the current object or array. +// This assumes that overflow won't practically be an issue since +// 1< 0. +func (e *stateEntry) decrement() { + (*e)-- +} + +// disableNamespace disables the JSON object namespace such that the +// Encoder or Decoder no longer updates the namespace. +func (e *stateEntry) disableNamespace() { + *e |= stateDisableNamespace +} + +// isActiveNamespace reports whether the JSON object namespace is actively +// being updated and used for duplicate name checks. +func (e stateEntry) isActiveNamespace() bool { + return e&(stateDisableNamespace) == 0 +} + +// invalidateNamespace marks the JSON object namespace as being invalid. +func (e *stateEntry) invalidateNamespace() { + *e |= stateInvalidNamespace +} + +// isValidNamespace reports whether the JSON object namespace is valid. +func (e stateEntry) isValidNamespace() bool { + return e&(stateInvalidNamespace) == 0 +} + +// objectNameStack is a stack of names when descending into a JSON object. +// In contrast to objectNamespaceStack, this only has to remember a single name +// per JSON object. +// +// This data structure may contain offsets to encodeBuffer or decodeBuffer. +// It violates clean abstraction of layers, but is significantly more efficient. +// This ensures that popping and pushing in the common case is a trivial +// push/pop of an offset integer. +// +// The zero value is an empty names stack ready for use. +type objectNameStack struct { + // offsets is a stack of offsets for each name. + // A non-negative offset is the ending offset into the local names buffer. + // A negative offset is the bit-wise inverse of a starting offset into + // a remote buffer (e.g., encodeBuffer or decodeBuffer). + // A math.MinInt offset at the end implies that the last object is empty. + // Invariant: Positive offsets always occur before negative offsets. + offsets []int + // unquotedNames is a back-to-back concatenation of names. + unquotedNames []byte +} + +func (ns *objectNameStack) reset() { + ns.offsets = ns.offsets[:0] + ns.unquotedNames = ns.unquotedNames[:0] + if cap(ns.offsets) > 1<<6 { + ns.offsets = nil // avoid pinning arbitrarily large amounts of memory + } + if cap(ns.unquotedNames) > 1<<10 { + ns.unquotedNames = nil // avoid pinning arbitrarily large amounts of memory + } +} + +func (ns *objectNameStack) length() int { + return len(ns.offsets) +} + +// getUnquoted retrieves the ith unquoted name in the namespace. +// It returns an empty string if the last object is empty. +// +// Invariant: Must call copyQuotedBuffer beforehand. +func (ns *objectNameStack) getUnquoted(i int) []byte { + ns.ensureCopiedBuffer() + if i == 0 { + return ns.unquotedNames[:ns.offsets[0]] + } else { + return ns.unquotedNames[ns.offsets[i-1]:ns.offsets[i-0]] + } +} + +// invalidOffset indicates that the last JSON object currently has no name. +const invalidOffset = math.MinInt + +// push descends into a nested JSON object. +func (ns *objectNameStack) push() { + ns.offsets = append(ns.offsets, invalidOffset) +} + +// replaceLastQuotedOffset replaces the last name with the starting offset +// to the quoted name in some remote buffer. All offsets provided must be +// relative to the same buffer until copyQuotedBuffer is called. +func (ns *objectNameStack) replaceLastQuotedOffset(i int) { + // Use bit-wise inversion instead of naive multiplication by -1 to avoid + // ambiguity regarding zero (which is a valid offset into the names field). + // Bit-wise inversion is mathematically equivalent to -i-1, + // such that 0 becomes -1, 1 becomes -2, and so forth. + // This ensures that remote offsets are always negative. + ns.offsets[len(ns.offsets)-1] = ^i +} + +// replaceLastUnquotedName replaces the last name with the provided name. +// +// Invariant: Must call copyQuotedBuffer beforehand. +func (ns *objectNameStack) replaceLastUnquotedName(s string) { + ns.ensureCopiedBuffer() + var startOffset int + if len(ns.offsets) > 1 { + startOffset = ns.offsets[len(ns.offsets)-2] + } + ns.unquotedNames = append(ns.unquotedNames[:startOffset], s...) + ns.offsets[len(ns.offsets)-1] = len(ns.unquotedNames) +} + +// clearLast removes any name in the last JSON object. +// It is semantically equivalent to ns.push followed by ns.pop. +func (ns *objectNameStack) clearLast() { + ns.offsets[len(ns.offsets)-1] = invalidOffset +} + +// pop ascends out of a nested JSON object. +func (ns *objectNameStack) pop() { + ns.offsets = ns.offsets[:len(ns.offsets)-1] +} + +// copyQuotedBuffer copies names from the remote buffer into the local names +// buffer so that there are no more offset references into the remote buffer. +// This allows the remote buffer to change contents without affecting +// the names that this data structure is trying to remember. +func (ns *objectNameStack) copyQuotedBuffer(b []byte) { + // Find the first negative offset. + var i int + for i = len(ns.offsets) - 1; i >= 0 && ns.offsets[i] < 0; i-- { + continue + } + + // Copy each name from the remote buffer into the local buffer. + for i = i + 1; i < len(ns.offsets); i++ { + if i == len(ns.offsets)-1 && ns.offsets[i] == invalidOffset { + if i == 0 { + ns.offsets[i] = 0 + } else { + ns.offsets[i] = ns.offsets[i-1] + } + break // last JSON object had a push without any names + } + + // As a form of Hyrum proofing, we write an invalid character into the + // buffer to make misuse of Decoder.ReadToken more obvious. + // We need to undo that mutation here. + quotedName := b[^ns.offsets[i]:] + if quotedName[0] == invalidateBufferByte { + quotedName[0] = '"' + } + + // Append the unquoted name to the local buffer. + var startOffset int + if i > 0 { + startOffset = ns.offsets[i-1] + } + if n := consumeSimpleString(quotedName); n > 0 { + ns.unquotedNames = append(ns.unquotedNames[:startOffset], quotedName[len(`"`):n-len(`"`)]...) + } else { + ns.unquotedNames, _ = unescapeString(ns.unquotedNames[:startOffset], quotedName) + } + ns.offsets[i] = len(ns.unquotedNames) + } +} + +func (ns *objectNameStack) ensureCopiedBuffer() { + if len(ns.offsets) > 0 && ns.offsets[len(ns.offsets)-1] < 0 { + panic("BUG: copyQuotedBuffer not called beforehand") + } +} + +// objectNamespaceStack is a stack of object namespaces. +// This data structure assists in detecting duplicate names. +type objectNamespaceStack []objectNamespace + +// reset resets the object namespace stack. +func (nss *objectNamespaceStack) reset() { + if cap(*nss) > 1<<10 { + *nss = nil + } + *nss = (*nss)[:0] +} + +// push starts a new namespace for a nested JSON object. +func (nss *objectNamespaceStack) push() { + if cap(*nss) > len(*nss) { + *nss = (*nss)[:len(*nss)+1] + nss.last().reset() + } else { + *nss = append(*nss, objectNamespace{}) + } +} + +// last returns a pointer to the last JSON object namespace. +func (nss objectNamespaceStack) last() *objectNamespace { + return &nss[len(nss)-1] +} + +// pop terminates the namespace for a nested JSON object. +func (nss *objectNamespaceStack) pop() { + *nss = (*nss)[:len(*nss)-1] +} + +// objectNamespace is the namespace for a JSON object. +// In contrast to objectNameStack, this needs to remember a all names +// per JSON object. +// +// The zero value is an empty namespace ready for use. +type objectNamespace struct { + // It relies on a linear search over all the names before switching + // to use a Go map for direct lookup. + + // endOffsets is a list of offsets to the end of each name in buffers. + // The length of offsets is the number of names in the namespace. + endOffsets []uint + // allUnquotedNames is a back-to-back concatenation of every name in the namespace. + allUnquotedNames []byte + // mapNames is a Go map containing every name in the namespace. + // Only valid if non-nil. + mapNames map[string]struct{} +} + +// reset resets the namespace to be empty. +func (ns *objectNamespace) reset() { + ns.endOffsets = ns.endOffsets[:0] + ns.allUnquotedNames = ns.allUnquotedNames[:0] + ns.mapNames = nil + if cap(ns.endOffsets) > 1<<6 { + ns.endOffsets = nil // avoid pinning arbitrarily large amounts of memory + } + if cap(ns.allUnquotedNames) > 1<<10 { + ns.allUnquotedNames = nil // avoid pinning arbitrarily large amounts of memory + } +} + +// length reports the number of names in the namespace. +func (ns *objectNamespace) length() int { + return len(ns.endOffsets) +} + +// getUnquoted retrieves the ith unquoted name in the namespace. +func (ns *objectNamespace) getUnquoted(i int) []byte { + if i == 0 { + return ns.allUnquotedNames[:ns.endOffsets[0]] + } else { + return ns.allUnquotedNames[ns.endOffsets[i-1]:ns.endOffsets[i-0]] + } +} + +// lastUnquoted retrieves the last name in the namespace. +func (ns *objectNamespace) lastUnquoted() []byte { + return ns.getUnquoted(ns.length() - 1) +} + +// insertQuoted inserts a name and reports whether it was inserted, +// which only occurs if name is not already in the namespace. +// The provided name must be a valid JSON string. +func (ns *objectNamespace) insertQuoted(name []byte, isVerbatim bool) bool { + if isVerbatim { + name = name[len(`"`) : len(name)-len(`"`)] + } + return ns.insert(name, !isVerbatim) +} +func (ns *objectNamespace) insertUnquoted(name []byte) bool { + return ns.insert(name, false) +} +func (ns *objectNamespace) insert(name []byte, quoted bool) bool { + var allNames []byte + if quoted { + allNames, _ = unescapeString(ns.allUnquotedNames, name) + } else { + allNames = append(ns.allUnquotedNames, name...) + } + name = allNames[len(ns.allUnquotedNames):] + + // Switch to a map if the buffer is too large for linear search. + // This does not add the current name to the map. + if ns.mapNames == nil && (ns.length() > 64 || len(ns.allUnquotedNames) > 1024) { + ns.mapNames = make(map[string]struct{}) + var startOffset uint + for _, endOffset := range ns.endOffsets { + name := ns.allUnquotedNames[startOffset:endOffset] + ns.mapNames[string(name)] = struct{}{} // allocates a new string + startOffset = endOffset + } + } + + if ns.mapNames == nil { + // Perform linear search over the buffer to find matching names. + // It provides O(n) lookup, but does not require any allocations. + var startOffset uint + for _, endOffset := range ns.endOffsets { + if string(ns.allUnquotedNames[startOffset:endOffset]) == string(name) { + return false + } + startOffset = endOffset + } + } else { + // Use the map if it is populated. + // It provides O(1) lookup, but requires a string allocation per name. + if _, ok := ns.mapNames[string(name)]; ok { + return false + } + ns.mapNames[string(name)] = struct{}{} // allocates a new string + } + + ns.allUnquotedNames = allNames + ns.endOffsets = append(ns.endOffsets, uint(len(ns.allUnquotedNames))) + return true +} + +// removeLast removes the last name in the namespace. +func (ns *objectNamespace) removeLast() { + if ns.mapNames != nil { + delete(ns.mapNames, string(ns.lastUnquoted())) + } + if ns.length()-1 == 0 { + ns.endOffsets = ns.endOffsets[:0] + ns.allUnquotedNames = ns.allUnquotedNames[:0] + } else { + ns.endOffsets = ns.endOffsets[:ns.length()-1] + ns.allUnquotedNames = ns.allUnquotedNames[:ns.endOffsets[ns.length()-1]] + } +} + +type uintSet64 uint64 + +func (s uintSet64) has(i uint) bool { return s&(1< 0 } +func (s *uintSet64) set(i uint) { *s |= 1 << i } + +// uintSet is a set of unsigned integers. +// It is optimized for most integers being close to zero. +type uintSet struct { + lo uintSet64 + hi []uintSet64 +} + +// has reports whether i is in the set. +func (s *uintSet) has(i uint) bool { + if i < 64 { + return s.lo.has(i) + } else { + i -= 64 + iHi, iLo := int(i/64), i%64 + return iHi < len(s.hi) && s.hi[iHi].has(iLo) + } +} + +// insert inserts i into the set and reports whether it was the first insertion. +func (s *uintSet) insert(i uint) bool { + // TODO: Make this inlineable at least for the lower 64-bit case. + if i < 64 { + has := s.lo.has(i) + s.lo.set(i) + return !has + } else { + i -= 64 + iHi, iLo := int(i/64), i%64 + if iHi >= len(s.hi) { + s.hi = append(s.hi, make([]uintSet64, iHi+1-len(s.hi))...) + s.hi = s.hi[:cap(s.hi)] + } + has := s.hi[iHi].has(iLo) + s.hi[iHi].set(iLo) + return !has + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go new file mode 100644 index 000000000..9acba7dad --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go @@ -0,0 +1,522 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "math" + "strconv" +) + +// NOTE: Token is analogous to v1 json.Token. + +const ( + maxInt64 = math.MaxInt64 + minInt64 = math.MinInt64 + maxUint64 = math.MaxUint64 + minUint64 = 0 // for consistency and readability purposes + + invalidTokenPanic = "invalid json.Token; it has been voided by a subsequent json.Decoder call" +) + +// Token represents a lexical JSON token, which may be one of the following: +// - a JSON literal (i.e., null, true, or false) +// - a JSON string (e.g., "hello, world!") +// - a JSON number (e.g., 123.456) +// - a start or end delimiter for a JSON object (i.e., { or } ) +// - a start or end delimiter for a JSON array (i.e., [ or ] ) +// +// A Token cannot represent entire array or object values, while a RawValue can. +// There is no Token to represent commas and colons since +// these structural tokens can be inferred from the surrounding context. +type Token struct { + nonComparable + + // Tokens can exist in either a "raw" or an "exact" form. + // Tokens produced by the Decoder are in the "raw" form. + // Tokens returned by constructors are usually in the "exact" form. + // The Encoder accepts Tokens in either the "raw" or "exact" form. + // + // The following chart shows the possible values for each Token type: + // ╔═════════════════╦════════════╤════════════╤════════════╗ + // ║ Token type ║ raw field │ str field │ num field ║ + // ╠═════════════════╬════════════╪════════════╪════════════╣ + // ║ null (raw) ║ "null" │ "" │ 0 ║ + // ║ false (raw) ║ "false" │ "" │ 0 ║ + // ║ true (raw) ║ "true" │ "" │ 0 ║ + // ║ string (raw) ║ non-empty │ "" │ offset ║ + // ║ string (string) ║ nil │ non-empty │ 0 ║ + // ║ number (raw) ║ non-empty │ "" │ offset ║ + // ║ number (float) ║ nil │ "f" │ non-zero ║ + // ║ number (int64) ║ nil │ "i" │ non-zero ║ + // ║ number (uint64) ║ nil │ "u" │ non-zero ║ + // ║ object (delim) ║ "{" or "}" │ "" │ 0 ║ + // ║ array (delim) ║ "[" or "]" │ "" │ 0 ║ + // ╚═════════════════╩════════════╧════════════╧════════════╝ + // + // Notes: + // - For tokens stored in "raw" form, the num field contains the + // absolute offset determined by raw.previousOffsetStart(). + // The buffer itself is stored in raw.previousBuffer(). + // - JSON literals and structural characters are always in the "raw" form. + // - JSON strings and numbers can be in either "raw" or "exact" forms. + // - The exact zero value of JSON strings and numbers in the "exact" forms + // have ambiguous representation. Thus, they are always represented + // in the "raw" form. + + // raw contains a reference to the raw decode buffer. + // If non-nil, then its value takes precedence over str and num. + // It is only valid if num == raw.previousOffsetStart(). + raw *decodeBuffer + + // str is the unescaped JSON string if num is zero. + // Otherwise, it is "f", "i", or "u" if num should be interpreted + // as a float64, int64, or uint64, respectively. + str string + + // num is a float64, int64, or uint64 stored as a uint64 value. + // It is non-zero for any JSON number in the "exact" form. + num uint64 +} + +// TODO: Does representing 1-byte delimiters as *decodeBuffer cause performance issues? + +var ( + Null Token = rawToken("null") + False Token = rawToken("false") + True Token = rawToken("true") + + ObjectStart Token = rawToken("{") + ObjectEnd Token = rawToken("}") + ArrayStart Token = rawToken("[") + ArrayEnd Token = rawToken("]") + + zeroString Token = rawToken(`""`) + zeroNumber Token = rawToken(`0`) + + nanString Token = String("NaN") + pinfString Token = String("Infinity") + ninfString Token = String("-Infinity") +) + +func rawToken(s string) Token { + return Token{raw: &decodeBuffer{buf: []byte(s), prevStart: 0, prevEnd: len(s)}} +} + +// Bool constructs a Token representing a JSON boolean. +func Bool(b bool) Token { + if b { + return True + } + return False +} + +// String constructs a Token representing a JSON string. +// The provided string should contain valid UTF-8, otherwise invalid characters +// may be mangled as the Unicode replacement character. +func String(s string) Token { + if len(s) == 0 { + return zeroString + } + return Token{str: s} +} + +// Float constructs a Token representing a JSON number. +// The values NaN, +Inf, and -Inf will be represented +// as a JSON string with the values "NaN", "Infinity", and "-Infinity". +func Float(n float64) Token { + switch { + case math.Float64bits(n) == 0: + return zeroNumber + case math.IsNaN(n): + return nanString + case math.IsInf(n, +1): + return pinfString + case math.IsInf(n, -1): + return ninfString + } + return Token{str: "f", num: math.Float64bits(n)} +} + +// Int constructs a Token representing a JSON number from an int64. +func Int(n int64) Token { + if n == 0 { + return zeroNumber + } + return Token{str: "i", num: uint64(n)} +} + +// Uint constructs a Token representing a JSON number from a uint64. +func Uint(n uint64) Token { + if n == 0 { + return zeroNumber + } + return Token{str: "u", num: uint64(n)} +} + +// Clone makes a copy of the Token such that its value remains valid +// even after a subsequent Decoder.Read call. +func (t Token) Clone() Token { + // TODO: Allow caller to avoid any allocations? + if raw := t.raw; raw != nil { + // Avoid copying globals. + if t.raw.prevStart == 0 { + switch t.raw { + case Null.raw: + return Null + case False.raw: + return False + case True.raw: + return True + case ObjectStart.raw: + return ObjectStart + case ObjectEnd.raw: + return ObjectEnd + case ArrayStart.raw: + return ArrayStart + case ArrayEnd.raw: + return ArrayEnd + } + } + + if uint64(raw.previousOffsetStart()) != t.num { + panic(invalidTokenPanic) + } + // TODO(https://go.dev/issue/45038): Use bytes.Clone. + buf := append([]byte(nil), raw.previousBuffer()...) + return Token{raw: &decodeBuffer{buf: buf, prevStart: 0, prevEnd: len(buf)}} + } + return t +} + +// Bool returns the value for a JSON boolean. +// It panics if the token kind is not a JSON boolean. +func (t Token) Bool() bool { + switch t.raw { + case True.raw: + return true + case False.raw: + return false + default: + panic("invalid JSON token kind: " + t.Kind().String()) + } +} + +// appendString appends a JSON string to dst and returns it. +// It panics if t is not a JSON string. +func (t Token) appendString(dst []byte, validateUTF8, preserveRaw bool, escapeRune func(rune) bool) ([]byte, error) { + if raw := t.raw; raw != nil { + // Handle raw string value. + buf := raw.previousBuffer() + if Kind(buf[0]) == '"' { + if escapeRune == nil && consumeSimpleString(buf) == len(buf) { + return append(dst, buf...), nil + } + dst, _, err := reformatString(dst, buf, validateUTF8, preserveRaw, escapeRune) + return dst, err + } + } else if len(t.str) != 0 && t.num == 0 { + // Handle exact string value. + return appendString(dst, t.str, validateUTF8, escapeRune) + } + + panic("invalid JSON token kind: " + t.Kind().String()) +} + +// String returns the unescaped string value for a JSON string. +// For other JSON kinds, this returns the raw JSON representation. +func (t Token) String() string { + // This is inlinable to take advantage of "function outlining". + // This avoids an allocation for the string(b) conversion + // if the caller does not use the string in an escaping manner. + // See https://blog.filippo.io/efficient-go-apis-with-the-inliner/ + s, b := t.string() + if len(b) > 0 { + return string(b) + } + return s +} +func (t Token) string() (string, []byte) { + if raw := t.raw; raw != nil { + if uint64(raw.previousOffsetStart()) != t.num { + panic(invalidTokenPanic) + } + buf := raw.previousBuffer() + if buf[0] == '"' { + // TODO: Preserve valueFlags in Token? + isVerbatim := consumeSimpleString(buf) == len(buf) + return "", unescapeStringMayCopy(buf, isVerbatim) + } + // Handle tokens that are not JSON strings for fmt.Stringer. + return "", buf + } + if len(t.str) != 0 && t.num == 0 { + return t.str, nil + } + // Handle tokens that are not JSON strings for fmt.Stringer. + if t.num > 0 { + switch t.str[0] { + case 'f': + return string(appendNumber(nil, math.Float64frombits(t.num), 64)), nil + case 'i': + return strconv.FormatInt(int64(t.num), 10), nil + case 'u': + return strconv.FormatUint(uint64(t.num), 10), nil + } + } + return "", nil +} + +// appendNumber appends a JSON number to dst and returns it. +// It panics if t is not a JSON number. +func (t Token) appendNumber(dst []byte, canonicalize bool) ([]byte, error) { + if raw := t.raw; raw != nil { + // Handle raw number value. + buf := raw.previousBuffer() + if Kind(buf[0]).normalize() == '0' { + if !canonicalize { + return append(dst, buf...), nil + } + dst, _, err := reformatNumber(dst, buf, canonicalize) + return dst, err + } + } else if t.num != 0 { + // Handle exact number value. + switch t.str[0] { + case 'f': + return appendNumber(dst, math.Float64frombits(t.num), 64), nil + case 'i': + return strconv.AppendInt(dst, int64(t.num), 10), nil + case 'u': + return strconv.AppendUint(dst, uint64(t.num), 10), nil + } + } + + panic("invalid JSON token kind: " + t.Kind().String()) +} + +// Float returns the floating-point value for a JSON number. +// It returns a NaN, +Inf, or -Inf value for any JSON string +// with the values "NaN", "Infinity", or "-Infinity". +// It panics for all other cases. +func (t Token) Float() float64 { + if raw := t.raw; raw != nil { + // Handle raw number value. + if uint64(raw.previousOffsetStart()) != t.num { + panic(invalidTokenPanic) + } + buf := raw.previousBuffer() + if Kind(buf[0]).normalize() == '0' { + fv, _ := parseFloat(buf, 64) + return fv + } + } else if t.num != 0 { + // Handle exact number value. + switch t.str[0] { + case 'f': + return math.Float64frombits(t.num) + case 'i': + return float64(int64(t.num)) + case 'u': + return float64(uint64(t.num)) + } + } + + // Handle string values with "NaN", "Infinity", or "-Infinity". + if t.Kind() == '"' { + switch t.String() { + case "NaN": + return math.NaN() + case "Infinity": + return math.Inf(+1) + case "-Infinity": + return math.Inf(-1) + } + } + + panic("invalid JSON token kind: " + t.Kind().String()) +} + +// Int returns the signed integer value for a JSON number. +// The fractional component of any number is ignored (truncation toward zero). +// Any number beyond the representation of an int64 will be saturated +// to the closest representable value. +// It panics if the token kind is not a JSON number. +func (t Token) Int() int64 { + if raw := t.raw; raw != nil { + // Handle raw integer value. + if uint64(raw.previousOffsetStart()) != t.num { + panic(invalidTokenPanic) + } + neg := false + buf := raw.previousBuffer() + if len(buf) > 0 && buf[0] == '-' { + neg, buf = true, buf[1:] + } + if numAbs, ok := parseDecUint(buf); ok { + if neg { + if numAbs > -minInt64 { + return minInt64 + } + return -1 * int64(numAbs) + } else { + if numAbs > +maxInt64 { + return maxInt64 + } + return +1 * int64(numAbs) + } + } + } else if t.num != 0 { + // Handle exact integer value. + switch t.str[0] { + case 'i': + return int64(t.num) + case 'u': + if t.num > maxInt64 { + return maxInt64 + } + return int64(t.num) + } + } + + // Handle JSON number that is a floating-point value. + if t.Kind() == '0' { + switch fv := t.Float(); { + case fv >= maxInt64: + return maxInt64 + case fv <= minInt64: + return minInt64 + default: + return int64(fv) // truncation toward zero + } + } + + panic("invalid JSON token kind: " + t.Kind().String()) +} + +// Uint returns the unsigned integer value for a JSON number. +// The fractional component of any number is ignored (truncation toward zero). +// Any number beyond the representation of an uint64 will be saturated +// to the closest representable value. +// It panics if the token kind is not a JSON number. +func (t Token) Uint() uint64 { + // NOTE: This accessor returns 0 for any negative JSON number, + // which might be surprising, but is at least consistent with the behavior + // of saturating out-of-bounds numbers to the closest representable number. + + if raw := t.raw; raw != nil { + // Handle raw integer value. + if uint64(raw.previousOffsetStart()) != t.num { + panic(invalidTokenPanic) + } + neg := false + buf := raw.previousBuffer() + if len(buf) > 0 && buf[0] == '-' { + neg, buf = true, buf[1:] + } + if num, ok := parseDecUint(buf); ok { + if neg { + return minUint64 + } + return num + } + } else if t.num != 0 { + // Handle exact integer value. + switch t.str[0] { + case 'u': + return t.num + case 'i': + if int64(t.num) < minUint64 { + return minUint64 + } + return uint64(int64(t.num)) + } + } + + // Handle JSON number that is a floating-point value. + if t.Kind() == '0' { + switch fv := t.Float(); { + case fv >= maxUint64: + return maxUint64 + case fv <= minUint64: + return minUint64 + default: + return uint64(fv) // truncation toward zero + } + } + + panic("invalid JSON token kind: " + t.Kind().String()) +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + switch { + case t.raw != nil: + raw := t.raw + if uint64(raw.previousOffsetStart()) != t.num { + panic(invalidTokenPanic) + } + return Kind(t.raw.buf[raw.prevStart]).normalize() + case t.num != 0: + return '0' + case len(t.str) != 0: + return '"' + default: + return invalidKind + } +} + +// Kind represents each possible JSON token kind with a single byte, +// which is conveniently the first byte of that kind's grammar +// with the restriction that numbers always be represented with '0': +// +// - 'n': null +// - 'f': false +// - 't': true +// - '"': string +// - '0': number +// - '{': object start +// - '}': object end +// - '[': array start +// - ']': array end +// +// An invalid kind is usually represented using 0, +// but may be non-zero due to invalid JSON data. +type Kind byte + +const invalidKind Kind = 0 + +// String prints the kind in a humanly readable fashion. +func (k Kind) String() string { + switch k { + case 'n': + return "null" + case 'f': + return "false" + case 't': + return "true" + case '"': + return "string" + case '0': + return "number" + case '{': + return "{" + case '}': + return "}" + case '[': + return "[" + case ']': + return "]" + default: + return "" + } +} + +// normalize coalesces all possible starting characters of a number as just '0'. +func (k Kind) normalize() Kind { + if k == '-' || ('0' <= k && k <= '9') { + return '0' + } + return k +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go new file mode 100644 index 000000000..e0bd1b31d --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go @@ -0,0 +1,381 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "errors" + "io" + "sort" + "sync" + "unicode/utf16" + "unicode/utf8" +) + +// NOTE: RawValue is analogous to v1 json.RawMessage. + +// RawValue represents a single raw JSON value, which may be one of the following: +// - a JSON literal (i.e., null, true, or false) +// - a JSON string (e.g., "hello, world!") +// - a JSON number (e.g., 123.456) +// - an entire JSON object (e.g., {"fizz":"buzz"} ) +// - an entire JSON array (e.g., [1,2,3] ) +// +// RawValue can represent entire array or object values, while Token cannot. +// RawValue may contain leading and/or trailing whitespace. +type RawValue []byte + +// Clone returns a copy of v. +func (v RawValue) Clone() RawValue { + if v == nil { + return nil + } + return append(RawValue{}, v...) +} + +// String returns the string formatting of v. +func (v RawValue) String() string { + if v == nil { + return "null" + } + return string(v) +} + +// IsValid reports whether the raw JSON value is syntactically valid +// according to RFC 7493. +// +// It verifies whether the input is properly encoded as UTF-8, +// that escape sequences within strings decode to valid Unicode codepoints, and +// that all names in each object are unique. +// It does not verify whether numbers are representable within the limits +// of any common numeric type (e.g., float64, int64, or uint64). +func (v RawValue) IsValid() bool { + d := getBufferedDecoder(v, DecodeOptions{}) + defer putBufferedDecoder(d) + _, errVal := d.ReadValue() + _, errEOF := d.ReadToken() + return errVal == nil && errEOF == io.EOF +} + +// Compact removes all whitespace from the raw JSON value. +// +// It does not reformat JSON strings to use any other representation. +// It is guaranteed to succeed if the input is valid. +// If the value is already compacted, then the buffer is not mutated. +func (v *RawValue) Compact() error { + return v.reformat(false, false, "", "") +} + +// Indent reformats the whitespace in the raw JSON value so that each element +// in a JSON object or array begins on a new, indented line beginning with +// prefix followed by one or more copies of indent according to the nesting. +// The value does not begin with the prefix nor any indention, +// to make it easier to embed inside other formatted JSON data. +// +// It does not reformat JSON strings to use any other representation. +// It is guaranteed to succeed if the input is valid. +// If the value is already indented properly, then the buffer is not mutated. +func (v *RawValue) Indent(prefix, indent string) error { + return v.reformat(false, true, prefix, indent) +} + +// Canonicalize canonicalizes the raw JSON value according to the +// JSON Canonicalization Scheme (JCS) as defined by RFC 8785 +// where it produces a stable representation of a JSON value. +// +// The output stability is dependent on the stability of the application data +// (see RFC 8785, Appendix E). It cannot produce stable output from +// fundamentally unstable input. For example, if the JSON value +// contains ephemeral data (e.g., a frequently changing timestamp), +// then the value is still unstable regardless of whether this is called. +// +// Note that JCS treats all JSON numbers as IEEE 754 double precision numbers. +// Any numbers with precision beyond what is representable by that form +// will lose their precision when canonicalized. For example, integer values +// beyond ±2⁵³ will lose their precision. It is recommended that +// int64 and uint64 data types be represented as a JSON string. +// +// It is guaranteed to succeed if the input is valid. +// If the value is already canonicalized, then the buffer is not mutated. +func (v *RawValue) Canonicalize() error { + return v.reformat(true, false, "", "") +} + +// TODO: Instead of implementing the v1 Marshaler/Unmarshaler, +// consider implementing the v2 versions instead. + +// MarshalJSON returns v as the JSON encoding of v. +// It returns the stored value as the raw JSON output without any validation. +// If v is nil, then this returns a JSON null. +func (v RawValue) MarshalJSON() ([]byte, error) { + // NOTE: This matches the behavior of v1 json.RawMessage.MarshalJSON. + if v == nil { + return []byte("null"), nil + } + return v, nil +} + +// UnmarshalJSON sets v as the JSON encoding of b. +// It stores a copy of the provided raw JSON input without any validation. +func (v *RawValue) UnmarshalJSON(b []byte) error { + // NOTE: This matches the behavior of v1 json.RawMessage.UnmarshalJSON. + if v == nil { + return errors.New("json.RawValue: UnmarshalJSON on nil pointer") + } + *v = append((*v)[:0], b...) + return nil +} + +// Kind returns the starting token kind. +// For a valid value, this will never include '}' or ']'. +func (v RawValue) Kind() Kind { + if v := v[consumeWhitespace(v):]; len(v) > 0 { + return Kind(v[0]).normalize() + } + return invalidKind +} + +func (v *RawValue) reformat(canonical, multiline bool, prefix, indent string) error { + var eo EncodeOptions + if canonical { + eo.AllowInvalidUTF8 = false // per RFC 8785, section 3.2.4 + eo.AllowDuplicateNames = false // per RFC 8785, section 3.1 + eo.canonicalizeNumbers = true // per RFC 8785, section 3.2.2.3 + eo.EscapeRune = nil // per RFC 8785, section 3.2.2.2 + eo.multiline = false // per RFC 8785, section 3.2.1 + } else { + if s := trimLeftSpaceTab(prefix); len(s) > 0 { + panic("json: invalid character " + quoteRune([]byte(s)) + " in indent prefix") + } + if s := trimLeftSpaceTab(indent); len(s) > 0 { + panic("json: invalid character " + quoteRune([]byte(s)) + " in indent") + } + eo.AllowInvalidUTF8 = true + eo.AllowDuplicateNames = true + eo.preserveRawStrings = true + eo.multiline = multiline // in case indent is empty + eo.IndentPrefix = prefix + eo.Indent = indent + } + eo.omitTopLevelNewline = true + + // Write the entire value to reformat all tokens and whitespace. + e := getBufferedEncoder(eo) + defer putBufferedEncoder(e) + if err := e.WriteValue(*v); err != nil { + return err + } + + // For canonical output, we may need to reorder object members. + if canonical { + // Obtain a buffered encoder just to use its internal buffer as + // a scratch buffer in reorderObjects for reordering object members. + e2 := getBufferedEncoder(EncodeOptions{}) + defer putBufferedEncoder(e2) + + // Disable redundant checks performed earlier during encoding. + d := getBufferedDecoder(e.buf, DecodeOptions{AllowInvalidUTF8: true, AllowDuplicateNames: true}) + defer putBufferedDecoder(d) + reorderObjects(d, &e2.buf) // per RFC 8785, section 3.2.3 + } + + // Store the result back into the value if different. + if !bytes.Equal(*v, e.buf) { + *v = append((*v)[:0], e.buf...) + } + return nil +} + +func trimLeftSpaceTab(s string) string { + for i, r := range s { + switch r { + case ' ', '\t': + default: + return s[i:] + } + } + return "" +} + +type memberName struct { + // name is the unescaped name. + name []byte + // before and after are byte offsets into Decoder.buf that represents + // the entire name/value pair. It may contain leading commas. + before, after int64 +} + +var memberNamePool = sync.Pool{New: func() any { return new(memberNames) }} + +func getMemberNames() *memberNames { + ns := memberNamePool.Get().(*memberNames) + *ns = (*ns)[:0] + return ns +} +func putMemberNames(ns *memberNames) { + if cap(*ns) < 1<<10 { + for i := range *ns { + (*ns)[i] = memberName{} // avoid pinning name + } + memberNamePool.Put(ns) + } +} + +type memberNames []memberName + +func (m *memberNames) Len() int { return len(*m) } +func (m *memberNames) Less(i, j int) bool { return lessUTF16((*m)[i].name, (*m)[j].name) } +func (m *memberNames) Swap(i, j int) { (*m)[i], (*m)[j] = (*m)[j], (*m)[i] } + +// reorderObjects recursively reorders all object members in place +// according to the ordering specified in RFC 8785, section 3.2.3. +// +// Pre-conditions: +// - The value is valid (i.e., no decoder errors should ever occur). +// - The value is compact (i.e., no whitespace is present). +// - Initial call is provided a Decoder reading from the start of v. +// +// Post-conditions: +// - Exactly one JSON value is read from the Decoder. +// - All fully-parsed JSON objects are reordered by directly moving +// the members in the value buffer. +// +// The runtime is approximately O(n·log(n)) + O(m·log(m)), +// where n is len(v) and m is the total number of object members. +func reorderObjects(d *Decoder, scratch *[]byte) { + switch tok, _ := d.ReadToken(); tok.Kind() { + case '{': + // Iterate and collect the name and offsets for every object member. + members := getMemberNames() + defer putMemberNames(members) + var prevName []byte + isSorted := true + + beforeBody := d.InputOffset() // offset after '{' + for d.PeekKind() != '}' { + beforeName := d.InputOffset() + var flags valueFlags + name, _ := d.readValue(&flags) + name = unescapeStringMayCopy(name, flags.isVerbatim()) + reorderObjects(d, scratch) + afterValue := d.InputOffset() + + if isSorted && len(*members) > 0 { + isSorted = lessUTF16(prevName, []byte(name)) + } + *members = append(*members, memberName{name, beforeName, afterValue}) + prevName = name + } + afterBody := d.InputOffset() // offset before '}' + d.ReadToken() + + // Sort the members; return early if it's already sorted. + if isSorted { + return + } + // TODO(https://go.dev/issue/47619): Use slices.Sort. + sort.Sort(members) + + // Append the reordered members to a new buffer, + // then copy the reordered members back over the original members. + // Avoid swapping in place since each member may be a different size + // where moving a member over a smaller member may corrupt the data + // for subsequent members before they have been moved. + // + // The following invariant must hold: + // sum([m.after-m.before for m in members]) == afterBody-beforeBody + sorted := (*scratch)[:0] + for i, member := range *members { + if d.buf[member.before] == ',' { + member.before++ // trim leading comma + } + sorted = append(sorted, d.buf[member.before:member.after]...) + if i < len(*members)-1 { + sorted = append(sorted, ',') // append trailing comma + } + } + if int(afterBody-beforeBody) != len(sorted) { + panic("BUG: length invariant violated") + } + copy(d.buf[beforeBody:afterBody], sorted) + + // Update scratch buffer to the largest amount ever used. + if len(sorted) > len(*scratch) { + *scratch = sorted + } + case '[': + for d.PeekKind() != ']' { + reorderObjects(d, scratch) + } + d.ReadToken() + } +} + +// lessUTF16 reports whether x is lexicographically less than y according +// to the UTF-16 codepoints of the UTF-8 encoded input strings. +// This implements the ordering specified in RFC 8785, section 3.2.3. +// The inputs must be valid UTF-8, otherwise this may panic. +func lessUTF16[Bytes []byte | string](x, y Bytes) bool { + // NOTE: This is an optimized, allocation-free implementation + // of lessUTF16Simple in fuzz_test.go. FuzzLessUTF16 verifies that the + // two implementations agree on the result of comparing any two strings. + + isUTF16Self := func(r rune) bool { + return ('\u0000' <= r && r <= '\uD7FF') || ('\uE000' <= r && r <= '\uFFFF') + } + + var invalidUTF8 bool + x0, y0 := x, y + for { + if len(x) == 0 || len(y) == 0 { + if len(x) == len(y) && invalidUTF8 { + return string(x0) < string(y0) + } + return len(x) < len(y) + } + + // ASCII fast-path. + if x[0] < utf8.RuneSelf || y[0] < utf8.RuneSelf { + if x[0] != y[0] { + return x[0] < y[0] + } + x, y = x[1:], y[1:] + continue + } + + // Decode next pair of runes as UTF-8. + // TODO(https://go.dev/issue/56948): Use a generic implementation + // of utf8.DecodeRune, or rely on a compiler optimization to statically + // hide the cost of a type switch (https://go.dev/issue/57072). + var rx, ry rune + var nx, ny int + switch any(x).(type) { + case string: + rx, nx = utf8.DecodeRuneInString(string(x)) + ry, ny = utf8.DecodeRuneInString(string(y)) + case []byte: + rx, nx = utf8.DecodeRune([]byte(x)) + ry, ny = utf8.DecodeRune([]byte(y)) + } + + selfx := isUTF16Self(rx) + selfy := isUTF16Self(ry) + switch { + // The x rune is a single UTF-16 codepoint, while + // the y rune is a surrogate pair of UTF-16 codepoints. + case selfx && !selfy: + ry, _ = utf16.EncodeRune(ry) + // The y rune is a single UTF-16 codepoint, while + // the x rune is a surrogate pair of UTF-16 codepoints. + case selfy && !selfx: + rx, _ = utf16.EncodeRune(rx) + } + if rx != ry { + return rx < ry + } + invalidUTF8 = invalidUTF8 || (rx == utf8.RuneError && nx == 1) || (ry == utf8.RuneError && ny == 1) + x, y = x[nx:], y[ny:] + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go new file mode 100644 index 000000000..61141a500 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go @@ -0,0 +1,260 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemaconv + +import ( + "errors" + "path" + "strings" + + "k8s.io/kube-openapi/pkg/validation/spec" + "sigs.k8s.io/structured-merge-diff/v4/schema" +) + +// ToSchemaFromOpenAPI converts a directory of OpenAPI schemas to an smd Schema. +// - models: a map from definition name to OpenAPI V3 structural schema for each definition. +// Key in map is used to resolve references in the schema. +// - preserveUnknownFields: flag indicating whether unknown fields in all schemas should be preserved. +// - returns: nil and an error if there is a parse error, or if schema does not satisfy a +// required structural schema invariant for conversion. If no error, returns +// a new smd schema. +// +// Schema should be validated as structural before using with this function, or +// there may be information lost. +func ToSchemaFromOpenAPI(models map[string]*spec.Schema, preserveUnknownFields bool) (*schema.Schema, error) { + c := convert{ + preserveUnknownFields: preserveUnknownFields, + output: &schema.Schema{}, + } + + for name, spec := range models { + // Skip/Ignore top-level references + if len(spec.Ref.String()) > 0 { + continue + } + + var a schema.Atom + + // Hard-coded schemas for now as proto_models implementation functions. + // https://github.com/kubernetes/kube-openapi/issues/364 + if name == quantityResource { + a = schema.Atom{ + Scalar: untypedDef.Atom.Scalar, + } + } else if name == rawExtensionResource { + a = untypedDef.Atom + } else { + c2 := c.push(name, &a) + c2.visitSpec(spec) + c.pop(c2) + } + + c.insertTypeDef(name, a) + } + + if len(c.errorMessages) > 0 { + return nil, errors.New(strings.Join(c.errorMessages, "\n")) + } + + c.addCommonTypes() + return c.output, nil +} + +func (c *convert) visitSpec(m *spec.Schema) { + // Check if this schema opts its descendants into preserve-unknown-fields + if p, ok := m.Extensions["x-kubernetes-preserve-unknown-fields"]; ok && p == true { + c.preserveUnknownFields = true + } + a := c.top() + *a = c.parseSchema(m) +} + +func (c *convert) parseSchema(m *spec.Schema) schema.Atom { + // k8s-generated OpenAPI specs have historically used only one value for + // type and starting with OpenAPIV3 it is only allowed to be + // a single string. + typ := "" + if len(m.Type) > 0 { + typ = m.Type[0] + } + + // Structural Schemas produced by kubernetes follow very specific rules which + // we can use to infer the SMD type: + switch typ { + case "": + // According to Swagger docs: + // https://swagger.io/docs/specification/data-models/data-types/#any + // + // If no type is specified, it is equivalent to accepting any type. + return schema.Atom{ + Scalar: ptr(schema.Scalar("untyped")), + List: c.parseList(m), + Map: c.parseObject(m), + } + + case "object": + return schema.Atom{ + Map: c.parseObject(m), + } + case "array": + return schema.Atom{ + List: c.parseList(m), + } + case "integer", "boolean", "number", "string": + return convertPrimitive(typ, m.Format) + default: + c.reportError("unrecognized type: '%v'", typ) + return schema.Atom{ + Scalar: ptr(schema.Scalar("untyped")), + } + } +} + +func (c *convert) makeOpenAPIRef(specSchema *spec.Schema) schema.TypeRef { + refString := specSchema.Ref.String() + + // Special-case handling for $ref stored inside a single-element allOf + if len(refString) == 0 && len(specSchema.AllOf) == 1 && len(specSchema.AllOf[0].Ref.String()) > 0 { + refString = specSchema.AllOf[0].Ref.String() + } + + if _, n := path.Split(refString); len(n) > 0 { + //!TODO: Refactor the field ElementRelationship override + // we can generate the types with overrides ahead of time rather than + // requiring the hacky runtime support + // (could just create a normalized key struct containing all customizations + // to deduplicate) + mapRelationship, err := getMapElementRelationship(specSchema.Extensions) + if err != nil { + c.reportError(err.Error()) + } + + if len(mapRelationship) > 0 { + return schema.TypeRef{ + NamedType: &n, + ElementRelationship: &mapRelationship, + } + } + + return schema.TypeRef{ + NamedType: &n, + } + + } + var inlined schema.Atom + + // compute the type inline + c2 := c.push("inlined in "+c.currentName, &inlined) + c2.preserveUnknownFields = c.preserveUnknownFields + c2.visitSpec(specSchema) + c.pop(c2) + + return schema.TypeRef{ + Inlined: inlined, + } +} + +func (c *convert) parseObject(s *spec.Schema) *schema.Map { + var fields []schema.StructField + for name, member := range s.Properties { + fields = append(fields, schema.StructField{ + Name: name, + Type: c.makeOpenAPIRef(&member), + Default: member.Default, + }) + } + + // AdditionalProperties informs the schema of any "unknown" keys + // Unknown keys are enforced by the ElementType field. + elementType := func() schema.TypeRef { + if s.AdditionalProperties == nil { + // According to openAPI spec, an object without properties and without + // additionalProperties is assumed to be a free-form object. + if c.preserveUnknownFields || len(s.Properties) == 0 { + return schema.TypeRef{ + NamedType: &deducedName, + } + } + + // If properties are specified, do not implicitly allow unknown + // fields + return schema.TypeRef{} + } else if s.AdditionalProperties.Schema != nil { + // Unknown fields use the referred schema + return c.makeOpenAPIRef(s.AdditionalProperties.Schema) + + } else if s.AdditionalProperties.Allows { + // A boolean instead of a schema was provided. Deduce the + // type from the value provided at runtime. + return schema.TypeRef{ + NamedType: &deducedName, + } + } else { + // Additional Properties are explicitly disallowed by the user. + // Ensure element type is empty. + return schema.TypeRef{} + } + }() + + relationship, err := getMapElementRelationship(s.Extensions) + if err != nil { + c.reportError(err.Error()) + } + + return &schema.Map{ + Fields: fields, + ElementRelationship: relationship, + ElementType: elementType, + } +} + +func (c *convert) parseList(s *spec.Schema) *schema.List { + relationship, mapKeys, err := getListElementRelationship(s.Extensions) + if err != nil { + c.reportError(err.Error()) + } + elementType := func() schema.TypeRef { + if s.Items != nil { + if s.Items.Schema == nil || s.Items.Len() != 1 { + c.reportError("structural schema arrays must have exactly one member subtype") + return schema.TypeRef{ + NamedType: &deducedName, + } + } + + subSchema := s.Items.Schema + if subSchema == nil { + subSchema = &s.Items.Schemas[0] + } + return c.makeOpenAPIRef(subSchema) + } else if len(s.Type) > 0 && len(s.Type[0]) > 0 { + c.reportError("`items` must be specified on arrays") + } + + // A list with no items specified is treated as "untyped". + return schema.TypeRef{ + NamedType: &untypedName, + } + + }() + + return &schema.List{ + ElementRelationship: relationship, + Keys: mapKeys, + ElementType: elementType, + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go new file mode 100644 index 000000000..2c6fd76a9 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go @@ -0,0 +1,178 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemaconv + +import ( + "errors" + "path" + "strings" + + "k8s.io/kube-openapi/pkg/util/proto" + "sigs.k8s.io/structured-merge-diff/v4/schema" +) + +// ToSchema converts openapi definitions into a schema suitable for structured +// merge (i.e. kubectl apply v2). +func ToSchema(models proto.Models) (*schema.Schema, error) { + return ToSchemaWithPreserveUnknownFields(models, false) +} + +// ToSchemaWithPreserveUnknownFields converts openapi definitions into a schema suitable for structured +// merge (i.e. kubectl apply v2), it will preserve unknown fields if specified. +func ToSchemaWithPreserveUnknownFields(models proto.Models, preserveUnknownFields bool) (*schema.Schema, error) { + c := convert{ + preserveUnknownFields: preserveUnknownFields, + output: &schema.Schema{}, + } + for _, name := range models.ListModels() { + model := models.LookupModel(name) + + var a schema.Atom + c2 := c.push(name, &a) + model.Accept(c2) + c.pop(c2) + + c.insertTypeDef(name, a) + } + + if len(c.errorMessages) > 0 { + return nil, errors.New(strings.Join(c.errorMessages, "\n")) + } + + c.addCommonTypes() + return c.output, nil +} + +func (c *convert) makeRef(model proto.Schema, preserveUnknownFields bool) schema.TypeRef { + var tr schema.TypeRef + if r, ok := model.(*proto.Ref); ok { + if r.Reference() == "io.k8s.apimachinery.pkg.runtime.RawExtension" { + return schema.TypeRef{ + NamedType: &untypedName, + } + } + // reference a named type + _, n := path.Split(r.Reference()) + tr.NamedType = &n + + mapRelationship, err := getMapElementRelationship(model.GetExtensions()) + + if err != nil { + c.reportError(err.Error()) + } + + // empty string means unset. + if len(mapRelationship) > 0 { + tr.ElementRelationship = &mapRelationship + } + } else { + // compute the type inline + c2 := c.push("inlined in "+c.currentName, &tr.Inlined) + c2.preserveUnknownFields = preserveUnknownFields + model.Accept(c2) + c.pop(c2) + + if tr == (schema.TypeRef{}) { + // emit warning? + tr.NamedType = &untypedName + } + } + return tr +} + +func (c *convert) VisitKind(k *proto.Kind) { + preserveUnknownFields := c.preserveUnknownFields + if p, ok := k.GetExtensions()["x-kubernetes-preserve-unknown-fields"]; ok && p == true { + preserveUnknownFields = true + } + + a := c.top() + a.Map = &schema.Map{} + for _, name := range k.FieldOrder { + member := k.Fields[name] + tr := c.makeRef(member, preserveUnknownFields) + a.Map.Fields = append(a.Map.Fields, schema.StructField{ + Name: name, + Type: tr, + Default: member.GetDefault(), + }) + } + + unions, err := makeUnions(k.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + return + } + // TODO: We should check that the fields and discriminator + // specified in the union are actual fields in the struct. + a.Map.Unions = unions + + if preserveUnknownFields { + a.Map.ElementType = schema.TypeRef{ + NamedType: &deducedName, + } + } + + a.Map.ElementRelationship, err = getMapElementRelationship(k.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + } +} + +func (c *convert) VisitArray(a *proto.Array) { + relationship, mapKeys, err := getListElementRelationship(a.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + } + + atom := c.top() + atom.List = &schema.List{ + ElementType: c.makeRef(a.SubType, c.preserveUnknownFields), + ElementRelationship: relationship, + Keys: mapKeys, + } +} + +func (c *convert) VisitMap(m *proto.Map) { + relationship, err := getMapElementRelationship(m.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + } + + a := c.top() + a.Map = &schema.Map{ + ElementType: c.makeRef(m.SubType, c.preserveUnknownFields), + ElementRelationship: relationship, + } +} + +func (c *convert) VisitPrimitive(p *proto.Primitive) { + a := c.top() + if c.currentName == quantityResource { + a.Scalar = ptr(schema.Scalar("untyped")) + } else { + *a = convertPrimitive(p.Type, p.Format) + } +} + +func (c *convert) VisitArbitrary(a *proto.Arbitrary) { + *c.top() = deducedDef.Atom +} + +func (c *convert) VisitReference(proto.Reference) { + // Do nothing, we handle references specially +} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go index 35241fde6..799d866d5 100644 --- a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go @@ -17,43 +17,18 @@ limitations under the License. package schemaconv import ( - "errors" "fmt" - "path" "sort" - "strings" - "k8s.io/kube-openapi/pkg/util/proto" "sigs.k8s.io/structured-merge-diff/v4/schema" ) const ( - quantityResource = "io.k8s.apimachinery.pkg.api.resource.Quantity" + quantityResource = "io.k8s.apimachinery.pkg.api.resource.Quantity" + rawExtensionResource = "io.k8s.apimachinery.pkg.runtime.RawExtension" ) -// ToSchema converts openapi definitions into a schema suitable for structured -// merge (i.e. kubectl apply v2). -func ToSchema(models proto.Models) (*schema.Schema, error) { - return ToSchemaWithPreserveUnknownFields(models, false) -} - -// ToSchemaWithPreserveUnknownFields converts openapi definitions into a schema suitable for structured -// merge (i.e. kubectl apply v2), it will preserve unknown fields if specified. -func ToSchemaWithPreserveUnknownFields(models proto.Models, preserveUnknownFields bool) (*schema.Schema, error) { - c := convert{ - input: models, - preserveUnknownFields: preserveUnknownFields, - output: &schema.Schema{}, - } - if err := c.convertAll(); err != nil { - return nil, err - } - c.addCommonTypes() - return c.output, nil -} - type convert struct { - input proto.Models preserveUnknownFields bool output *schema.Schema @@ -64,7 +39,6 @@ type convert struct { func (c *convert) push(name string, a *schema.Atom) *convert { return &convert{ - input: c.input, preserveUnknownFields: c.preserveUnknownFields, output: c.output, currentName: name, @@ -78,30 +52,17 @@ func (c *convert) pop(c2 *convert) { c.errorMessages = append(c.errorMessages, c2.errorMessages...) } -func (c *convert) convertAll() error { - for _, name := range c.input.ListModels() { - model := c.input.LookupModel(name) - c.insertTypeDef(name, model) - } - if len(c.errorMessages) > 0 { - return errors.New(strings.Join(c.errorMessages, "\n")) - } - return nil -} - func (c *convert) reportError(format string, args ...interface{}) { c.errorMessages = append(c.errorMessages, c.currentName+": "+fmt.Sprintf(format, args...), ) } -func (c *convert) insertTypeDef(name string, model proto.Schema) { +func (c *convert) insertTypeDef(name string, atom schema.Atom) { def := schema.TypeDef{ Name: name, + Atom: atom, } - c2 := c.push(name, &def.Atom) - model.Accept(c2) - c.pop(c2) if def.Atom == (schema.Atom{}) { // This could happen if there were a top-level reference. return @@ -156,32 +117,6 @@ var deducedDef schema.TypeDef = schema.TypeDef{ }, } -func (c *convert) makeRef(model proto.Schema, preserveUnknownFields bool) schema.TypeRef { - var tr schema.TypeRef - if r, ok := model.(*proto.Ref); ok { - if r.Reference() == "io.k8s.apimachinery.pkg.runtime.RawExtension" { - return schema.TypeRef{ - NamedType: &untypedName, - } - } - // reference a named type - _, n := path.Split(r.Reference()) - tr.NamedType = &n - } else { - // compute the type inline - c2 := c.push("inlined in "+c.currentName, &tr.Inlined) - c2.preserveUnknownFields = preserveUnknownFields - model.Accept(c2) - c.pop(c2) - - if tr == (schema.TypeRef{}) { - // emit warning? - tr.NamedType = &untypedName - } - } - return tr -} - func makeUnions(extensions map[string]interface{}) ([]schema.Union, error) { schemaUnions := []schema.Union{} if iunions, ok := extensions["x-kubernetes-unions"]; ok { @@ -285,52 +220,6 @@ func makeUnion(extensions map[string]interface{}) (schema.Union, error) { return union, nil } -func (c *convert) VisitKind(k *proto.Kind) { - preserveUnknownFields := c.preserveUnknownFields - if p, ok := k.GetExtensions()["x-kubernetes-preserve-unknown-fields"]; ok && p == true { - preserveUnknownFields = true - } - - a := c.top() - a.Map = &schema.Map{} - for _, name := range k.FieldOrder { - member := k.Fields[name] - tr := c.makeRef(member, preserveUnknownFields) - a.Map.Fields = append(a.Map.Fields, schema.StructField{ - Name: name, - Type: tr, - Default: member.GetDefault(), - }) - } - - unions, err := makeUnions(k.GetExtensions()) - if err != nil { - c.reportError(err.Error()) - return - } - // TODO: We should check that the fields and discriminator - // specified in the union are actual fields in the struct. - a.Map.Unions = unions - - if preserveUnknownFields { - a.Map.ElementType = schema.TypeRef{ - NamedType: &deducedName, - } - } - - ext := k.GetExtensions() - if val, ok := ext["x-kubernetes-map-type"]; ok { - switch val { - case "atomic": - a.Map.ElementRelationship = schema.Atomic - case "granular": - a.Map.ElementRelationship = schema.Separable - default: - c.reportError("unknown map type %v", val) - } - } -} - func toStringSlice(o interface{}) (out []string, ok bool) { switch t := o.(type) { case []interface{}: @@ -341,117 +230,108 @@ func toStringSlice(o interface{}) (out []string, ok bool) { } } return out, true + case []string: + return t, true } return nil, false } -func (c *convert) VisitArray(a *proto.Array) { - atom := c.top() - atom.List = &schema.List{ - ElementRelationship: schema.Atomic, - } - l := atom.List - l.ElementType = c.makeRef(a.SubType, c.preserveUnknownFields) - - ext := a.GetExtensions() +func ptr(s schema.Scalar) *schema.Scalar { return &s } - if val, ok := ext["x-kubernetes-list-type"]; ok { - if val == "atomic" { - l.ElementRelationship = schema.Atomic - } else if val == "set" { - l.ElementRelationship = schema.Associative - } else if val == "map" { - l.ElementRelationship = schema.Associative - if keys, ok := ext["x-kubernetes-list-map-keys"]; ok { - if keyNames, ok := toStringSlice(keys); ok { - l.Keys = keyNames - } else { - c.reportError("uninterpreted map keys: %#v", keys) - } - } else { - c.reportError("missing map keys") - } - } else { - c.reportError("unknown list type %v", val) - l.ElementRelationship = schema.Atomic - } - } else if val, ok := ext["x-kubernetes-patch-strategy"]; ok { - if val == "merge" || val == "merge,retainKeys" { - l.ElementRelationship = schema.Associative - if key, ok := ext["x-kubernetes-patch-merge-key"]; ok { - if keyName, ok := key.(string); ok { - l.Keys = []string{keyName} - } else { - c.reportError("uninterpreted merge key: %#v", key) - } - } else { - // It's not an error for this to be absent, it - // means it's a set. - } - } else if val == "retainKeys" { - } else { - c.reportError("unknown patch strategy %v", val) - l.ElementRelationship = schema.Atomic +// Basic conversion functions to convert OpenAPI schema definitions to +// SMD Schema atoms +func convertPrimitive(typ string, format string) (a schema.Atom) { + switch typ { + case "integer": + a.Scalar = ptr(schema.Numeric) + case "number": + a.Scalar = ptr(schema.Numeric) + case "string": + switch format { + case "": + a.Scalar = ptr(schema.String) + case "byte": + // byte really means []byte and is encoded as a string. + a.Scalar = ptr(schema.String) + case "int-or-string": + a.Scalar = ptr(schema.Scalar("untyped")) + case "date-time": + a.Scalar = ptr(schema.Scalar("untyped")) + default: + a.Scalar = ptr(schema.Scalar("untyped")) } + case "boolean": + a.Scalar = ptr(schema.Boolean) + default: + a.Scalar = ptr(schema.Scalar("untyped")) } -} -func (c *convert) VisitMap(m *proto.Map) { - a := c.top() - a.Map = &schema.Map{} - a.Map.ElementType = c.makeRef(m.SubType, c.preserveUnknownFields) + return a +} - ext := m.GetExtensions() - if val, ok := ext["x-kubernetes-map-type"]; ok { +func getListElementRelationship(ext map[string]any) (schema.ElementRelationship, []string, error) { + if val, ok := ext["x-kubernetes-list-type"]; ok { switch val { case "atomic": - a.Map.ElementRelationship = schema.Atomic - case "granular": - a.Map.ElementRelationship = schema.Separable + return schema.Atomic, nil, nil + case "set": + return schema.Associative, nil, nil + case "map": + keys, ok := ext["x-kubernetes-list-map-keys"] + + if !ok { + return schema.Associative, nil, fmt.Errorf("missing map keys") + } + + keyNames, ok := toStringSlice(keys) + if !ok { + return schema.Associative, nil, fmt.Errorf("uninterpreted map keys: %#v", keys) + } + + return schema.Associative, keyNames, nil default: - c.reportError("unknown map type %v", val) + return schema.Atomic, nil, fmt.Errorf("unknown list type %v", val) } - } -} + } else if val, ok := ext["x-kubernetes-patch-strategy"]; ok { + switch val { + case "merge", "merge,retainKeys": + if key, ok := ext["x-kubernetes-patch-merge-key"]; ok { + keyName, ok := key.(string) -func ptr(s schema.Scalar) *schema.Scalar { return &s } + if !ok { + return schema.Associative, nil, fmt.Errorf("uninterpreted merge key: %#v", key) + } -func (c *convert) VisitPrimitive(p *proto.Primitive) { - a := c.top() - if c.currentName == quantityResource { - a.Scalar = ptr(schema.Scalar("untyped")) - } else { - switch p.Type { - case proto.Integer: - a.Scalar = ptr(schema.Numeric) - case proto.Number: - a.Scalar = ptr(schema.Numeric) - case proto.String: - switch p.Format { - case "": - a.Scalar = ptr(schema.String) - case "byte": - // byte really means []byte and is encoded as a string. - a.Scalar = ptr(schema.String) - case "int-or-string": - a.Scalar = ptr(schema.Scalar("untyped")) - case "date-time": - a.Scalar = ptr(schema.Scalar("untyped")) - default: - a.Scalar = ptr(schema.Scalar("untyped")) + return schema.Associative, []string{keyName}, nil } - case proto.Boolean: - a.Scalar = ptr(schema.Boolean) + // It's not an error for x-kubernetes-patch-merge-key to be absent, + // it means it's a set + return schema.Associative, nil, nil + case "retainKeys": + return schema.Atomic, nil, nil default: - a.Scalar = ptr(schema.Scalar("untyped")) + return schema.Atomic, nil, fmt.Errorf("unknown patch strategy %v", val) } } -} -func (c *convert) VisitArbitrary(a *proto.Arbitrary) { - *c.top() = deducedDef.Atom + // Treat as atomic by default + return schema.Atomic, nil, nil } -func (c *convert) VisitReference(proto.Reference) { - // Do nothing, we handle references specially +// Returns map element relationship if specified, or empty string if unspecified +func getMapElementRelationship(ext map[string]any) (schema.ElementRelationship, error) { + val, ok := ext["x-kubernetes-map-type"] + if !ok { + // unset Map element relationship + return "", nil + } + + switch val { + case "atomic": + return schema.Atomic, nil + case "granular": + return schema.Separable, nil + default: + return "", fmt.Errorf("unknown map type %v", val) + } } diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go index 51dac4bdf..699291f1d 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go @@ -18,7 +18,10 @@ package spec3 import ( "encoding/json" + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -41,6 +44,9 @@ func (e *Encoding) MarshalJSON() ([]byte, error) { } func (e *Encoding) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, e) + } if err := json.Unmarshal(data, &e.EncodingProps); err != nil { return err } @@ -50,6 +56,20 @@ func (e *Encoding) UnmarshalJSON(data []byte) error { return nil } +func (e *Encoding) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + EncodingProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + + e.Extensions = internal.SanitizeExtensions(x.Extensions) + e.EncodingProps = x.EncodingProps + return nil +} + type EncodingProps struct { // Content Type for encoding a specific property ContentType string `json:"contentType,omitempty"` @@ -58,7 +78,7 @@ type EncodingProps struct { // Describes how a specific property value will be serialized depending on its type Style string `json:"style,omitempty"` // When this is true, property values of type array or object generate separate parameters for each value of the array, or key-value-pair of the map. For other types of properties this property has no effect - Explode string `json:"explode,omitempty"` + Explode bool `json:"explode,omitempty"` // AllowReserved determines whether the parameter value SHOULD allow reserved characters, as defined by RFC3986 AllowReserved bool `json:"allowReserved,omitempty"` } diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/example.go b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go index 0f5ab983c..03b872717 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/example.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go @@ -19,8 +19,11 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + + "k8s.io/kube-openapi/pkg/validation/spec" ) // Example https://swagger.io/specification/#example-object @@ -49,6 +52,9 @@ func (e *Example) MarshalJSON() ([]byte, error) { } func (e *Example) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, e) + } if err := json.Unmarshal(data, &e.Refable); err != nil { return err } @@ -61,6 +67,23 @@ func (e *Example) UnmarshalJSON(data []byte) error { return nil } +func (e *Example) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ExampleProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&e.Ref.Ref, x.Extensions); err != nil { + return err + } + e.Extensions = internal.SanitizeExtensions(x.Extensions) + e.ExampleProps = x.ExampleProps + + return nil +} + type ExampleProps struct { // Summary holds a short description of the example Summary string `json:"summary,omitempty"` diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go index 117113e7a..e79956721 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go @@ -18,8 +18,11 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" ) type ExternalDocumentation struct { @@ -48,6 +51,9 @@ func (e *ExternalDocumentation) MarshalJSON() ([]byte, error) { } func (e *ExternalDocumentation) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, e) + } if err := json.Unmarshal(data, &e.ExternalDocumentationProps); err != nil { return err } @@ -56,3 +62,16 @@ func (e *ExternalDocumentation) UnmarshalJSON(data []byte) error { } return nil } + +func (e *ExternalDocumentation) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ExternalDocumentationProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + e.Extensions = internal.SanitizeExtensions(x.Extensions) + e.ExternalDocumentationProps = x.ExternalDocumentationProps + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go new file mode 100644 index 000000000..bc19dd48e --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go @@ -0,0 +1,254 @@ +package spec3 + +import ( + "math/rand" + "strings" + + fuzz "github.com/google/gofuzz" + + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// refChance is the chance that a particular component will use a $ref +// instead of fuzzed. Expressed as a fraction 1/n, currently there is +// a 1/3 chance that a ref will be used. +const refChance = 3 + +const alphaNumChars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + +func randAlphanumString() string { + arr := make([]string, rand.Intn(10)+5) + for i := 0; i < len(arr); i++ { + arr[i] = string(alphaNumChars[rand.Intn(len(alphaNumChars))]) + } + return strings.Join(arr, "") +} + +var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ + func(s *string, c fuzz.Continue) { + // All OpenAPI V3 map keys must follow the corresponding + // regex. Note that this restricts the range for all other + // string values as well. + str := randAlphanumString() + *s = str + }, + func(o *OpenAPI, c fuzz.Continue) { + c.FuzzNoCustom(o) + o.Version = "3.0.0" + }, + func(r *interface{}, c fuzz.Continue) { + switch c.Intn(3) { + case 0: + *r = nil + case 1: + n := c.RandString() + "x" + *r = n + case 2: + n := c.Float64() + *r = n + } + }, + func(v **spec.Info, c fuzz.Continue) { + // Info is never nil + *v = &spec.Info{} + c.FuzzNoCustom(*v) + (*v).Title = c.RandString() + "x" + }, + func(v *Paths, c fuzz.Continue) { + c.Fuzz(&v.VendorExtensible) + num := c.Intn(5) + if num > 0 { + v.Paths = make(map[string]*Path) + } + for i := 0; i < num; i++ { + val := Path{} + c.Fuzz(&val) + v.Paths["/"+c.RandString()] = &val + } + }, + func(v *SecurityScheme, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + switch c.Intn(4) { + case 0: + v.Type = "apiKey" + v.Name = c.RandString() + "x" + switch c.Intn(3) { + case 0: + v.In = "query" + case 1: + v.In = "header" + case 2: + v.In = "cookie" + } + case 1: + v.Type = "http" + case 2: + v.Type = "oauth2" + v.Flows = make(map[string]*OAuthFlow) + flow := OAuthFlow{} + flow.AuthorizationUrl = c.RandString() + "x" + v.Flows["implicit"] = &flow + flow.Scopes = make(map[string]string) + flow.Scopes["foo"] = "bar" + case 3: + v.Type = "openIdConnect" + v.OpenIdConnectUrl = "https://" + c.RandString() + } + v.Scheme = "basic" + }, + func(v *spec.Ref, c fuzz.Continue) { + switch c.Intn(7) { + case 0: + *v = spec.MustCreateRef("#/components/schemas/" + randAlphanumString()) + case 1: + *v = spec.MustCreateRef("#/components/responses/" + randAlphanumString()) + case 2: + *v = spec.MustCreateRef("#/components/headers/" + randAlphanumString()) + case 3: + *v = spec.MustCreateRef("#/components/securitySchemes/" + randAlphanumString()) + case 5: + *v = spec.MustCreateRef("#/components/parameters/" + randAlphanumString()) + case 6: + *v = spec.MustCreateRef("#/components/requestBodies/" + randAlphanumString()) + } + }, + func(v *Parameter, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + c.Fuzz(&v.ParameterProps) + c.Fuzz(&v.VendorExtensible) + + switch c.Intn(3) { + case 0: + // Header param + v.In = "query" + case 1: + v.In = "header" + case 2: + v.In = "cookie" + } + }, + func(v *RequestBody, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + c.Fuzz(&v.RequestBodyProps) + c.Fuzz(&v.VendorExtensible) + }, + func(v *Header, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + c.Fuzz(&v.HeaderProps) + c.Fuzz(&v.VendorExtensible) + }, + func(v *ResponsesProps, c fuzz.Continue) { + c.Fuzz(&v.Default) + n := c.Intn(5) + for i := 0; i < n; i++ { + r2 := Response{} + c.Fuzz(&r2) + // HTTP Status code in 100-599 Range + code := c.Intn(500) + 100 + v.StatusCodeResponses = make(map[int]*Response) + v.StatusCodeResponses[code] = &r2 + } + }, + func(v *Response, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + c.Fuzz(&v.ResponseProps) + c.Fuzz(&v.VendorExtensible) + }, + func(v *spec.Extensions, c fuzz.Continue) { + numChildren := c.Intn(5) + for i := 0; i < numChildren; i++ { + if *v == nil { + *v = spec.Extensions{} + } + (*v)["x-"+c.RandString()] = c.RandString() + } + }, + func(v *spec.ExternalDocumentation, c fuzz.Continue) { + c.Fuzz(&v.Description) + v.URL = "https://" + randAlphanumString() + }, + func(v *spec.SchemaURL, c fuzz.Continue) { + *v = spec.SchemaURL("https://" + randAlphanumString()) + }, + func(v *spec.SchemaOrBool, c fuzz.Continue) { + *v = spec.SchemaOrBool{} + + if c.RandBool() { + v.Allows = c.RandBool() + } else { + v.Schema = &spec.Schema{} + v.Allows = true + c.Fuzz(&v.Schema) + } + }, + func(v *spec.SchemaOrArray, c fuzz.Continue) { + *v = spec.SchemaOrArray{} + if c.RandBool() { + schema := spec.Schema{} + c.Fuzz(&schema) + v.Schema = &schema + } else { + v.Schemas = []spec.Schema{} + numChildren := c.Intn(5) + for i := 0; i < numChildren; i++ { + schema := spec.Schema{} + c.Fuzz(&schema) + v.Schemas = append(v.Schemas, schema) + } + + } + + }, + func(v *spec.SchemaOrStringArray, c fuzz.Continue) { + if c.RandBool() { + *v = spec.SchemaOrStringArray{} + if c.RandBool() { + c.Fuzz(&v.Property) + } else { + c.Fuzz(&v.Schema) + } + } + }, + func(v *spec.Schema, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Ref) + return + } + if c.RandBool() { + // file schema + c.Fuzz(&v.Default) + c.Fuzz(&v.Description) + c.Fuzz(&v.Example) + c.Fuzz(&v.ExternalDocs) + + c.Fuzz(&v.Format) + c.Fuzz(&v.ReadOnly) + c.Fuzz(&v.Required) + c.Fuzz(&v.Title) + v.Type = spec.StringOrArray{"file"} + + } else { + // normal schema + c.Fuzz(&v.SchemaProps) + c.Fuzz(&v.SwaggerSchemaProps) + c.Fuzz(&v.VendorExtensible) + c.Fuzz(&v.ExtraProps) + } + + }, +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/header.go b/vendor/k8s.io/kube-openapi/pkg/spec3/header.go index cead4b15d..ee5a30f79 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/header.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/header.go @@ -20,6 +20,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -50,6 +52,9 @@ func (h *Header) MarshalJSON() ([]byte, error) { } func (h *Header) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, h) + } if err := json.Unmarshal(data, &h.Refable); err != nil { return err } @@ -63,6 +68,22 @@ func (h *Header) UnmarshalJSON(data []byte) error { return nil } +func (h *Header) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + HeaderProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&h.Ref.Ref, x.Extensions); err != nil { + return err + } + h.Extensions = internal.SanitizeExtensions(x.Extensions) + h.HeaderProps = x.HeaderProps + return nil +} + // HeaderProps a struct that describes a header object type HeaderProps struct { // Description holds a brief description of the parameter diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go index 828fd8dc5..d390e69bc 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go @@ -18,7 +18,10 @@ package spec3 import ( "encoding/json" + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -44,6 +47,9 @@ func (m *MediaType) MarshalJSON() ([]byte, error) { } func (m *MediaType) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, m) + } if err := json.Unmarshal(data, &m.MediaTypeProps); err != nil { return err } @@ -53,10 +59,24 @@ func (m *MediaType) UnmarshalJSON(data []byte) error { return nil } +func (m *MediaType) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + MediaTypeProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + m.Extensions = internal.SanitizeExtensions(x.Extensions) + m.MediaTypeProps = x.MediaTypeProps + + return nil +} + // MediaTypeProps a struct that allows you to specify content format, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#mediaTypeObject type MediaTypeProps struct { // Schema holds the schema defining the type used for the media type - Schema *spec.Schema `json:"schema,omitempty"` + Schema *spec.Schema `json:"schema,omitempty"` // Example of the media type Example interface{} `json:"example,omitempty"` // Examples of the media type. Each example object should match the media type and specific schema if present diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go index de8aa4602..28230610b 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go @@ -19,8 +19,10 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" ) // Operation describes a single API operation on a path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#operationObject @@ -46,12 +48,28 @@ func (o *Operation) MarshalJSON() ([]byte, error) { // UnmarshalJSON hydrates this items instance with the data from JSON func (o *Operation) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, o) + } if err := json.Unmarshal(data, &o.OperationProps); err != nil { return err } return json.Unmarshal(data, &o.VendorExtensible) } +func (o *Operation) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + OperationProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + o.Extensions = internal.SanitizeExtensions(x.Extensions) + o.OperationProps = x.OperationProps + return nil +} + // OperationProps describes a single API operation on a path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#operationObject type OperationProps struct { // Tags holds a list of tags for API documentation control @@ -73,7 +91,7 @@ type OperationProps struct { // Deprecated declares this operation to be deprecated Deprecated bool `json:"deprecated,omitempty"` // SecurityRequirement holds a declaration of which security mechanisms can be used for this operation - SecurityRequirement []*SecurityRequirement `json:"security,omitempty"` + SecurityRequirement []map[string][]string `json:"security,omitempty"` // Servers contains an alternative server array to service this operation Servers []*Server `json:"servers,omitempty"` } diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go b/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go index 0d7180e50..613da71a6 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go @@ -20,6 +20,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -50,6 +52,10 @@ func (p *Parameter) MarshalJSON() ([]byte, error) { } func (p *Parameter) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, p) + } + if err := json.Unmarshal(data, &p.Refable); err != nil { return err } @@ -63,6 +69,22 @@ func (p *Parameter) UnmarshalJSON(data []byte) error { return nil } +func (p *Parameter) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ParameterProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&p.Ref.Ref, x.Extensions); err != nil { + return err + } + p.Extensions = internal.SanitizeExtensions(x.Extensions) + p.ParameterProps = x.ParameterProps + return nil +} + // ParameterProps a struct that describes a single operation parameter, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#parameterObject type ParameterProps struct { // Name holds the name of the parameter diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/path.go b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go index bc48c504d..40d9061ac 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/path.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go @@ -18,10 +18,13 @@ package spec3 import ( "encoding/json" + "fmt" "strings" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" ) // Paths describes the available paths and operations for the API, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathsObject @@ -45,6 +48,9 @@ func (p *Paths) MarshalJSON() ([]byte, error) { // UnmarshalJSON hydrates this items instance with the data from JSON func (p *Paths) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, p) + } var res map[string]json.RawMessage if err := json.Unmarshal(data, &res); err != nil { return err @@ -74,6 +80,59 @@ func (p *Paths) UnmarshalJSON(data []byte) error { return nil } +func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + tok, err := dec.ReadToken() + if err != nil { + return err + } + switch k := tok.Kind(); k { + case 'n': + *p = Paths{} + return nil + case '{': + for { + tok, err := dec.ReadToken() + if err != nil { + return err + } + + if tok.Kind() == '}' { + return nil + } + + switch k := tok.String(); { + case internal.IsExtensionKey(k): + var ext any + if err := opts.UnmarshalNext(dec, &ext); err != nil { + return err + } + + if p.Extensions == nil { + p.Extensions = make(map[string]any) + } + p.Extensions[k] = ext + case len(k) > 0 && k[0] == '/': + pi := Path{} + if err := opts.UnmarshalNext(dec, &pi); err != nil { + return err + } + + if p.Paths == nil { + p.Paths = make(map[string]*Path) + } + p.Paths[k] = &pi + default: + _, err := dec.ReadValue() // skip value + if err != nil { + return err + } + } + } + default: + return fmt.Errorf("unknown JSON kind: %v", k) + } +} + // Path describes the operations available on a single path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathItemObject // // Note that this struct is actually a thin wrapper around PathProps to make it referable and extensible @@ -101,6 +160,9 @@ func (p *Path) MarshalJSON() ([]byte, error) { } func (p *Path) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, p) + } if err := json.Unmarshal(data, &p.Refable); err != nil { return err } @@ -113,6 +175,24 @@ func (p *Path) UnmarshalJSON(data []byte) error { return nil } +func (p *Path) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + PathProps + } + + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&p.Ref.Ref, x.Extensions); err != nil { + return err + } + p.Extensions = internal.SanitizeExtensions(x.Extensions) + p.PathProps = x.PathProps + + return nil +} + // PathProps describes the operations available on a single path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathItemObject type PathProps struct { // Summary holds a summary for all operations in this path diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go index 0adc62826..33267ce67 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go @@ -19,8 +19,10 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" ) // RequestBody describes a single request body, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#requestBodyObject @@ -50,6 +52,9 @@ func (r *RequestBody) MarshalJSON() ([]byte, error) { } func (r *RequestBody) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } if err := json.Unmarshal(data, &r.Refable); err != nil { return err } @@ -71,3 +76,19 @@ type RequestBodyProps struct { // Required determines if the request body is required in the request Required bool `json:"required,omitempty"` } + +func (r *RequestBody) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + RequestBodyProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&r.Ref.Ref, x.Extensions); err != nil { + return err + } + r.Extensions = internal.SanitizeExtensions(x.Extensions) + r.RequestBodyProps = x.RequestBodyProps + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/response.go b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go index ccd73369f..95b388e6c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/response.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go @@ -18,10 +18,13 @@ package spec3 import ( "encoding/json" + "fmt" "strconv" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" ) // Responses holds the list of possible responses as they are returned from executing this operation @@ -46,13 +49,15 @@ func (r *Responses) MarshalJSON() ([]byte, error) { } func (r *Responses) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { return err } if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { return err } - return nil } @@ -78,25 +83,91 @@ func (r ResponsesProps) MarshalJSON() ([]byte, error) { // UnmarshalJSON unmarshals responses from JSON func (r *ResponsesProps) UnmarshalJSON(data []byte) error { - var res map[string]*Response + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } + var res map[string]json.RawMessage if err := json.Unmarshal(data, &res); err != nil { - return nil + return err } if v, ok := res["default"]; ok { - r.Default = v + value := Response{} + if err := json.Unmarshal(v, &value); err != nil { + return err + } + r.Default = &value delete(res, "default") } for k, v := range res { + // Take all integral keys if nk, err := strconv.Atoi(k); err == nil { if r.StatusCodeResponses == nil { r.StatusCodeResponses = map[int]*Response{} } - r.StatusCodeResponses[nk] = v + value := Response{} + if err := json.Unmarshal(v, &value); err != nil { + return err + } + r.StatusCodeResponses[nk] = &value } } return nil } +func (r *Responses) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) (err error) { + tok, err := dec.ReadToken() + if err != nil { + return err + } + switch k := tok.Kind(); k { + case 'n': + *r = Responses{} + return nil + case '{': + for { + tok, err := dec.ReadToken() + if err != nil { + return err + } + if tok.Kind() == '}' { + return nil + } + switch k := tok.String(); { + case internal.IsExtensionKey(k): + var ext any + if err := opts.UnmarshalNext(dec, &ext); err != nil { + return err + } + + if r.Extensions == nil { + r.Extensions = make(map[string]any) + } + r.Extensions[k] = ext + case k == "default": + resp := Response{} + if err := opts.UnmarshalNext(dec, &resp); err != nil { + return err + } + r.ResponsesProps.Default = &resp + default: + if nk, err := strconv.Atoi(k); err == nil { + resp := Response{} + if err := opts.UnmarshalNext(dec, &resp); err != nil { + return err + } + + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]*Response{} + } + r.StatusCodeResponses[nk] = &resp + } + } + } + default: + return fmt.Errorf("unknown JSON kind: %v", k) + } +} + // Response describes a single response from an API Operation, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#responseObject // // Note that this struct is actually a thin wrapper around ResponseProps to make it referable and extensible @@ -124,6 +195,9 @@ func (r *Response) MarshalJSON() ([]byte, error) { } func (r *Response) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } if err := json.Unmarshal(data, &r.Refable); err != nil { return err } @@ -133,7 +207,22 @@ func (r *Response) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { return err } + return nil +} +func (r *Response) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ResponseProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&r.Ref.Ref, x.Extensions); err != nil { + return err + } + r.Extensions = internal.SanitizeExtensions(x.Extensions) + r.ResponseProps = x.ResponseProps return nil } @@ -149,7 +238,6 @@ type ResponseProps struct { Links map[string]*Link `json:"links,omitempty"` } - // Link represents a possible design-time link for a response, more at https://swagger.io/specification/#link-object type Link struct { spec.Refable @@ -175,6 +263,9 @@ func (r *Link) MarshalJSON() ([]byte, error) { } func (r *Link) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } if err := json.Unmarshal(data, &r.Refable); err != nil { return err } @@ -188,6 +279,22 @@ func (r *Link) UnmarshalJSON(data []byte) error { return nil } +func (l *Link) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + LinkProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&l.Ref.Ref, x.Extensions); err != nil { + return err + } + l.Extensions = internal.SanitizeExtensions(x.Extensions) + l.LinkProps = x.LinkProps + return nil +} + // LinkProps describes a single response from an API Operation, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#responseObject type LinkProps struct { // OperationId is the name of an existing, resolvable OAS operation diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go b/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go deleted file mode 100644 index 0ce8924ef..000000000 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec3 - -import ( - "encoding/json" - - "k8s.io/kube-openapi/pkg/validation/spec" - "github.com/go-openapi/swag" -) - -// SecurityRequirementProps describes the required security schemes to execute an operation, more at https://swagger.io/specification/#security-requirement-object -// -// Note that this struct is actually a thin wrapper around SecurityRequirementProps to make it referable and extensible -type SecurityRequirement struct { - SecurityRequirementProps - spec.VendorExtensible -} - -// MarshalJSON is a custom marshal function that knows how to encode SecurityRequirement as JSON -func (s *SecurityRequirement) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SecurityRequirementProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (s *SecurityRequirement) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &s.SecurityRequirementProps); err != nil { - return err - } - return json.Unmarshal(data, &s.VendorExtensible) -} - -// SecurityRequirementProps describes the required security schemes to execute an operation, more at https://swagger.io/specification/#security-requirement-object -type SecurityRequirementProps map[string][]string diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go b/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go index 9b1352f4e..edf7e6de3 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go @@ -19,8 +19,8 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" ) // SecurityScheme defines reusable Security Scheme Object, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#securitySchemeObject diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/server.go b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go index a505fb221..d5df0a781 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/server.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go @@ -18,9 +18,11 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" - "github.com/go-openapi/swag" + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" ) type Server struct { @@ -51,6 +53,10 @@ func (s *Server) MarshalJSON() ([]byte, error) { } func (s *Server) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, s) + } + if err := json.Unmarshal(data, &s.ServerProps); err != nil { return err } @@ -60,6 +66,20 @@ func (s *Server) UnmarshalJSON(data []byte) error { return nil } +func (s *Server) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ServerProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + s.Extensions = internal.SanitizeExtensions(x.Extensions) + s.ServerProps = x.ServerProps + + return nil +} + type ServerVariable struct { ServerVariableProps spec.VendorExtensible @@ -88,6 +108,9 @@ func (s *ServerVariable) MarshalJSON() ([]byte, error) { } func (s *ServerVariable) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, s) + } if err := json.Unmarshal(data, &s.ServerVariableProps); err != nil { return err } @@ -96,3 +119,17 @@ func (s *ServerVariable) UnmarshalJSON(data []byte) error { } return nil } + +func (s *ServerVariable) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ServerVariableProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + s.Extensions = internal.SanitizeExtensions(x.Extensions) + s.ServerVariableProps = x.ServerVariableProps + + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go b/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go index 3ff48a3c3..bed096fb7 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go @@ -17,6 +17,10 @@ limitations under the License. package spec3 import ( + "encoding/json" + + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -35,3 +39,12 @@ type OpenAPI struct { // ExternalDocs holds additional external documentation ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` } + +func (o *OpenAPI) UnmarshalJSON(data []byte) error { + type OpenAPIWithNoFunctions OpenAPI + p := (*OpenAPIWithNoFunctions)(o) + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, &p) + } + return json.Unmarshal(data, &p) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index 763923dff..5789e67ab 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -21,7 +21,7 @@ import ( "sort" "strings" - openapi_v2 "github.com/google/gnostic/openapiv2" + openapi_v2 "github.com/google/gnostic-models/openapiv2" "gopkg.in/yaml.v2" ) diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go index a3f476d5d..d9f2896e3 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go @@ -21,7 +21,7 @@ import ( "reflect" "strings" - openapi_v3 "github.com/google/gnostic/openapiv3" + openapi_v3 "github.com/google/gnostic-models/openapiv3" "gopkg.in/yaml.v3" ) @@ -120,7 +120,7 @@ func (d *Definitions) ParseSchemaV3(s *openapi_v3.Schema, path *Path) (Schema, e switch s.GetType() { case object: for _, extension := range s.GetSpecificationExtension() { - if extension.Name == "x-kuberentes-group-version-kind" { + if extension.Name == "x-kubernetes-group-version-kind" { // Objects with x-kubernetes-group-version-kind are always top // level types. return d.parseV3Kind(s, path) @@ -285,7 +285,7 @@ func parseV3Interface(def *yaml.Node) (interface{}, error) { func (d *Definitions) parseV3BaseSchema(s *openapi_v3.Schema, path *Path) (*BaseSchema, error) { if s == nil { - return nil, fmt.Errorf("cannot initializae BaseSchema from nil") + return nil, fmt.Errorf("cannot initialize BaseSchema from nil") } def, err := parseV3Interface(s.GetDefault().ToRawInfo()) diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go new file mode 100644 index 000000000..c66f998f5 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go @@ -0,0 +1,502 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec + +import ( + "github.com/go-openapi/jsonreference" + "github.com/google/go-cmp/cmp" + fuzz "github.com/google/gofuzz" +) + +var SwaggerFuzzFuncs []interface{} = []interface{}{ + func(v *Responses, c fuzz.Continue) { + c.FuzzNoCustom(v) + if v.Default != nil { + // Check if we hit maxDepth and left an incomplete value + if v.Default.Description == "" { + v.Default = nil + v.StatusCodeResponses = nil + } + } + + // conversion has no way to discern empty statusCodeResponses from + // nil, since "default" is always included in the map. + // So avoid empty responses list + if len(v.StatusCodeResponses) == 0 { + v.StatusCodeResponses = nil + } + }, + func(v *Operation, c fuzz.Continue) { + c.FuzzNoCustom(v) + + if v != nil { + // force non-nil + v.Responses = &Responses{} + c.Fuzz(v.Responses) + + v.Schemes = nil + if c.RandBool() { + v.Schemes = append(v.Schemes, "http") + } + + if c.RandBool() { + v.Schemes = append(v.Schemes, "https") + } + + if c.RandBool() { + v.Schemes = append(v.Schemes, "ws") + } + + if c.RandBool() { + v.Schemes = append(v.Schemes, "wss") + } + + // Gnostic unconditionally makes security values non-null + // So do not fuzz null values into the array. + for i, val := range v.Security { + if val == nil { + v.Security[i] = make(map[string][]string) + } + + for k, v := range val { + if v == nil { + val[k] = make([]string, 0) + } + } + } + } + }, + func(v map[int]Response, c fuzz.Continue) { + n := 0 + c.Fuzz(&n) + if n == 0 { + // Test that fuzzer is not at maxDepth so we do not + // end up with empty elements + return + } + + // Prevent negative numbers + num := c.Intn(4) + for i := 0; i < num+2; i++ { + val := Response{} + c.Fuzz(&val) + + val.Description = c.RandString() + "x" + v[100*(i+1)+c.Intn(100)] = val + } + }, + func(v map[string]PathItem, c fuzz.Continue) { + n := 0 + c.Fuzz(&n) + if n == 0 { + // Test that fuzzer is not at maxDepth so we do not + // end up with empty elements + return + } + + num := c.Intn(5) + for i := 0; i < num+2; i++ { + val := PathItem{} + c.Fuzz(&val) + + // Ref params are only allowed in certain locations, so + // possibly add a few to PathItems + numRefsToAdd := c.Intn(5) + for i := 0; i < numRefsToAdd; i++ { + theRef := Parameter{} + c.Fuzz(&theRef.Refable) + + val.Parameters = append(val.Parameters, theRef) + } + + v["/"+c.RandString()] = val + } + }, + func(v *SchemaOrArray, c fuzz.Continue) { + *v = SchemaOrArray{} + // gnostic parser just doesn't support more + // than one Schema here + v.Schema = &Schema{} + c.Fuzz(&v.Schema) + + }, + func(v *SchemaOrBool, c fuzz.Continue) { + *v = SchemaOrBool{} + + if c.RandBool() { + v.Allows = c.RandBool() + } else { + v.Schema = &Schema{} + v.Allows = true + c.Fuzz(&v.Schema) + } + }, + func(v map[string]Response, c fuzz.Continue) { + n := 0 + c.Fuzz(&n) + if n == 0 { + // Test that fuzzer is not at maxDepth so we do not + // end up with empty elements + return + } + + // Response definitions are not allowed to + // be refs + for i := 0; i < c.Intn(5)+1; i++ { + resp := &Response{} + + c.Fuzz(resp) + resp.Ref = Ref{} + resp.Description = c.RandString() + "x" + + // Response refs are not vendor extensible by gnostic + resp.VendorExtensible.Extensions = nil + v[c.RandString()+"x"] = *resp + } + }, + func(v *Header, c fuzz.Continue) { + if v != nil { + c.FuzzNoCustom(v) + + // descendant Items of Header may not be refs + cur := v.Items + for cur != nil { + cur.Ref = Ref{} + cur = cur.Items + } + } + }, + func(v *Ref, c fuzz.Continue) { + *v = Ref{} + v.Ref, _ = jsonreference.New("http://asd.com/" + c.RandString()) + }, + func(v *Response, c fuzz.Continue) { + *v = Response{} + if c.RandBool() { + v.Ref = Ref{} + v.Ref.Ref, _ = jsonreference.New("http://asd.com/" + c.RandString()) + } else { + c.Fuzz(&v.VendorExtensible) + c.Fuzz(&v.Schema) + c.Fuzz(&v.ResponseProps) + + v.Headers = nil + v.Ref = Ref{} + + n := 0 + c.Fuzz(&n) + if n != 0 { + // Test that fuzzer is not at maxDepth so we do not + // end up with empty elements + num := c.Intn(4) + for i := 0; i < num; i++ { + if v.Headers == nil { + v.Headers = make(map[string]Header) + } + hdr := Header{} + c.Fuzz(&hdr) + if hdr.Type == "" { + // hit maxDepth, just abort trying to make haders + v.Headers = nil + break + } + v.Headers[c.RandString()+"x"] = hdr + } + } else { + v.Headers = nil + } + } + + v.Description = c.RandString() + "x" + + // Gnostic parses empty as nil, so to keep avoid putting empty + if len(v.Headers) == 0 { + v.Headers = nil + } + }, + func(v **Info, c fuzz.Continue) { + // Info is never nil + *v = &Info{} + c.FuzzNoCustom(*v) + + (*v).Title = c.RandString() + "x" + }, + func(v *Extensions, c fuzz.Continue) { + // gnostic parser only picks up x- vendor extensions + numChildren := c.Intn(5) + for i := 0; i < numChildren; i++ { + if *v == nil { + *v = Extensions{} + } + (*v)["x-"+c.RandString()] = c.RandString() + } + }, + func(v *Swagger, c fuzz.Continue) { + c.FuzzNoCustom(v) + + if v.Paths == nil { + // Force paths non-nil since it does not have omitempty in json tag. + // This means a perfect roundtrip (via json) is impossible, + // since we can't tell the difference between empty/unspecified paths + v.Paths = &Paths{} + c.Fuzz(v.Paths) + } + + v.Swagger = "2.0" + + // Gnostic support serializing ID at all + // unavoidable data loss + v.ID = "" + + v.Schemes = nil + if c.RandUint64()%2 == 1 { + v.Schemes = append(v.Schemes, "http") + } + + if c.RandUint64()%2 == 1 { + v.Schemes = append(v.Schemes, "https") + } + + if c.RandUint64()%2 == 1 { + v.Schemes = append(v.Schemes, "ws") + } + + if c.RandUint64()%2 == 1 { + v.Schemes = append(v.Schemes, "wss") + } + + // Gnostic unconditionally makes security values non-null + // So do not fuzz null values into the array. + for i, val := range v.Security { + if val == nil { + v.Security[i] = make(map[string][]string) + } + + for k, v := range val { + if v == nil { + val[k] = make([]string, 0) + } + } + } + }, + func(v *SecurityScheme, c fuzz.Continue) { + v.Description = c.RandString() + "x" + c.Fuzz(&v.VendorExtensible) + + switch c.Intn(3) { + case 0: + v.Type = "basic" + case 1: + v.Type = "apiKey" + switch c.Intn(2) { + case 0: + v.In = "header" + case 1: + v.In = "query" + default: + panic("unreachable") + } + v.Name = "x" + c.RandString() + case 2: + v.Type = "oauth2" + + switch c.Intn(4) { + case 0: + v.Flow = "accessCode" + v.TokenURL = "https://" + c.RandString() + v.AuthorizationURL = "https://" + c.RandString() + case 1: + v.Flow = "application" + v.TokenURL = "https://" + c.RandString() + case 2: + v.Flow = "implicit" + v.AuthorizationURL = "https://" + c.RandString() + case 3: + v.Flow = "password" + v.TokenURL = "https://" + c.RandString() + default: + panic("unreachable") + } + c.Fuzz(&v.Scopes) + default: + panic("unreachable") + } + }, + func(v *interface{}, c fuzz.Continue) { + *v = c.RandString() + "x" + }, + func(v *string, c fuzz.Continue) { + *v = c.RandString() + "x" + }, + func(v *ExternalDocumentation, c fuzz.Continue) { + v.Description = c.RandString() + "x" + v.URL = c.RandString() + "x" + }, + func(v *SimpleSchema, c fuzz.Continue) { + c.FuzzNoCustom(v) + + switch c.Intn(5) { + case 0: + v.Type = "string" + case 1: + v.Type = "number" + case 2: + v.Type = "boolean" + case 3: + v.Type = "integer" + case 4: + v.Type = "array" + default: + panic("unreachable") + } + + switch c.Intn(5) { + case 0: + v.CollectionFormat = "csv" + case 1: + v.CollectionFormat = "ssv" + case 2: + v.CollectionFormat = "tsv" + case 3: + v.CollectionFormat = "pipes" + case 4: + v.CollectionFormat = "" + default: + panic("unreachable") + } + + // None of the types which include SimpleSchema in our definitions + // actually support "example" in the official spec + v.Example = nil + + // unsupported by openapi + v.Nullable = false + }, + func(v *int64, c fuzz.Continue) { + c.Fuzz(v) + + // Gnostic does not differentiate between 0 and non-specified + // so avoid using 0 for fuzzer + if *v == 0 { + *v = 1 + } + }, + func(v *float64, c fuzz.Continue) { + c.Fuzz(v) + + // Gnostic does not differentiate between 0 and non-specified + // so avoid using 0 for fuzzer + if *v == 0.0 { + *v = 1.0 + } + }, + func(v *Parameter, c fuzz.Continue) { + if v == nil { + return + } + c.Fuzz(&v.VendorExtensible) + if c.RandBool() { + // body param + v.Description = c.RandString() + "x" + v.Name = c.RandString() + "x" + v.In = "body" + c.Fuzz(&v.Description) + c.Fuzz(&v.Required) + + v.Schema = &Schema{} + c.Fuzz(&v.Schema) + + } else { + c.Fuzz(&v.SimpleSchema) + c.Fuzz(&v.CommonValidations) + v.AllowEmptyValue = false + v.Description = c.RandString() + "x" + v.Name = c.RandString() + "x" + + switch c.Intn(4) { + case 0: + // Header param + v.In = "header" + case 1: + // Form data param + v.In = "formData" + v.AllowEmptyValue = c.RandBool() + case 2: + // Query param + v.In = "query" + v.AllowEmptyValue = c.RandBool() + case 3: + // Path param + v.In = "path" + v.Required = true + default: + panic("unreachable") + } + + // descendant Items of Parameter may not be refs + cur := v.Items + for cur != nil { + cur.Ref = Ref{} + cur = cur.Items + } + } + }, + func(v *Schema, c fuzz.Continue) { + if c.RandBool() { + // file schema + c.Fuzz(&v.Default) + c.Fuzz(&v.Description) + c.Fuzz(&v.Example) + c.Fuzz(&v.ExternalDocs) + + c.Fuzz(&v.Format) + c.Fuzz(&v.ReadOnly) + c.Fuzz(&v.Required) + c.Fuzz(&v.Title) + v.Type = StringOrArray{"file"} + + } else { + // normal schema + c.Fuzz(&v.SchemaProps) + c.Fuzz(&v.SwaggerSchemaProps) + c.Fuzz(&v.VendorExtensible) + // c.Fuzz(&v.ExtraProps) + // ExtraProps will not roundtrip - gnostic throws out + // unrecognized keys + } + + // Not supported by official openapi v2 spec + // and stripped by k8s apiserver + v.ID = "" + v.AnyOf = nil + v.OneOf = nil + v.Not = nil + v.Nullable = false + v.AdditionalItems = nil + v.Schema = "" + v.PatternProperties = nil + v.Definitions = nil + v.Dependencies = nil + }, +} + +var SwaggerDiffOptions = []cmp.Option{ + // cmp.Diff panics on Ref since jsonreference.Ref uses unexported fields + cmp.Comparer(func(a Ref, b Ref) bool { + return a.String() == b.String() + }), +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go new file mode 100644 index 000000000..6a77f2ac8 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go @@ -0,0 +1,1517 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec + +import ( + "errors" + "strconv" + + "github.com/go-openapi/jsonreference" + openapi_v2 "github.com/google/gnostic-models/openapiv2" +) + +// Interfaces +type GnosticCommonValidations interface { + GetMaximum() float64 + GetExclusiveMaximum() bool + GetMinimum() float64 + GetExclusiveMinimum() bool + GetMaxLength() int64 + GetMinLength() int64 + GetPattern() string + GetMaxItems() int64 + GetMinItems() int64 + GetUniqueItems() bool + GetMultipleOf() float64 + GetEnum() []*openapi_v2.Any +} + +func (k *CommonValidations) FromGnostic(g GnosticCommonValidations) error { + if g == nil { + return nil + } + + max := g.GetMaximum() + if max != 0 { + k.Maximum = &max + } + + k.ExclusiveMaximum = g.GetExclusiveMaximum() + + min := g.GetMinimum() + if min != 0 { + k.Minimum = &min + } + + k.ExclusiveMinimum = g.GetExclusiveMinimum() + + maxLen := g.GetMaxLength() + if maxLen != 0 { + k.MaxLength = &maxLen + } + + minLen := g.GetMinLength() + if minLen != 0 { + k.MinLength = &minLen + } + + k.Pattern = g.GetPattern() + + maxItems := g.GetMaxItems() + if maxItems != 0 { + k.MaxItems = &maxItems + } + + minItems := g.GetMinItems() + if minItems != 0 { + k.MinItems = &minItems + } + + k.UniqueItems = g.GetUniqueItems() + + multOf := g.GetMultipleOf() + if multOf != 0 { + k.MultipleOf = &multOf + } + + enums := g.GetEnum() + + if enums != nil { + k.Enum = make([]interface{}, len(enums)) + for i, v := range enums { + if v == nil { + continue + } + + var convert interface{} + if err := v.ToRawInfo().Decode(&convert); err != nil { + return err + } else { + k.Enum[i] = convert + } + } + } + + return nil +} + +type GnosticSimpleSchema interface { + GetType() string + GetFormat() string + GetItems() *openapi_v2.PrimitivesItems + GetCollectionFormat() string + GetDefault() *openapi_v2.Any +} + +func (k *SimpleSchema) FromGnostic(g GnosticSimpleSchema) error { + if g == nil { + return nil + } + + k.Type = g.GetType() + k.Format = g.GetFormat() + k.CollectionFormat = g.GetCollectionFormat() + + items := g.GetItems() + if items != nil { + k.Items = &Items{} + if err := k.Items.FromGnostic(items); err != nil { + return err + } + } + + def := g.GetDefault() + if def != nil { + var convert interface{} + if err := def.ToRawInfo().Decode(&convert); err != nil { + return err + } else { + k.Default = convert + } + } + + return nil +} + +func (k *Items) FromGnostic(g *openapi_v2.PrimitivesItems) error { + if g == nil { + return nil + } + + if err := k.SimpleSchema.FromGnostic(g); err != nil { + return err + } + + if err := k.CommonValidations.FromGnostic(g); err != nil { + return err + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return err + } + + return nil +} + +func (k *VendorExtensible) FromGnostic(g []*openapi_v2.NamedAny) error { + if len(g) == 0 { + return nil + } + + k.Extensions = make(Extensions, len(g)) + for _, v := range g { + if v == nil { + continue + } + + if v.Value == nil { + k.Extensions[v.Name] = nil + continue + } + + var iface interface{} + if err := v.Value.ToRawInfo().Decode(&iface); err != nil { + return err + } else { + k.Extensions[v.Name] = iface + } + } + return nil +} + +func (k *Refable) FromGnostic(g string) error { + return k.Ref.FromGnostic(g) +} + +func (k *Ref) FromGnostic(g string) error { + if g == "" { + return nil + } + + ref, err := jsonreference.New(g) + if err != nil { + return err + } + + *k = Ref{ + Ref: ref, + } + + return nil +} + +// Converts a gnostic v2 Document to a kube-openapi Swagger Document +// +// Caveats: +// +// - gnostic v2 documents treats zero as unspecified for numerical fields of +// CommonValidations fields such as Maximum, Minimum, MaximumItems, etc. +// There will always be data loss if one of the values of these fields is set to zero. +// +// Returns: +// +// - `ok`: `false` if a value was present in the gnostic document which cannot be +// roundtripped into kube-openapi types. In these instances, `ok` is set to +// `false` and the value is skipped. +// +// - `err`: an unexpected error occurred in the conversion from the gnostic type +// to kube-openapi type. +func (k *Swagger) FromGnostic(g *openapi_v2.Document) (ok bool, err error) { + ok = true + if g == nil { + return true, nil + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + + if nok, err := k.SwaggerProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +func (k *SwaggerProps) FromGnostic(g *openapi_v2.Document) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + // openapi_v2.Document does not support "ID" field, so it will not be + // included + k.Consumes = g.Consumes + k.Produces = g.Produces + k.Schemes = g.Schemes + k.Swagger = g.Swagger + + if g.Info != nil { + k.Info = &Info{} + if nok, err := k.Info.FromGnostic(g.Info); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + k.Host = g.Host + k.BasePath = g.BasePath + + if g.Paths != nil { + k.Paths = &Paths{} + if nok, err := k.Paths.FromGnostic(g.Paths); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Definitions != nil { + k.Definitions = make(Definitions, len(g.Definitions.AdditionalProperties)) + for _, v := range g.Definitions.AdditionalProperties { + if v == nil { + continue + } + converted := Schema{} + if nok, err := converted.FromGnostic(v.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + k.Definitions[v.Name] = converted + + } + } + + if g.Parameters != nil { + k.Parameters = make( + map[string]Parameter, + len(g.Parameters.AdditionalProperties)) + for _, v := range g.Parameters.AdditionalProperties { + if v == nil { + continue + } + p := Parameter{} + if nok, err := p.FromGnostic(v.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + + k.Parameters[v.Name] = p + } + } + + if g.Responses != nil { + k.Responses = make( + map[string]Response, + len(g.Responses.AdditionalProperties)) + + for _, v := range g.Responses.AdditionalProperties { + if v == nil { + continue + } + p := Response{} + if nok, err := p.FromGnostic(v.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + + k.Responses[v.Name] = p + } + } + + if g.SecurityDefinitions != nil { + k.SecurityDefinitions = make(SecurityDefinitions) + if err := k.SecurityDefinitions.FromGnostic(g.SecurityDefinitions); err != nil { + return false, err + } + } + + if g.Security != nil { + k.Security = make([]map[string][]string, len(g.Security)) + for i, v := range g.Security { + if v == nil || v.AdditionalProperties == nil { + continue + } + + k.Security[i] = make(map[string][]string, len(v.AdditionalProperties)) + converted := k.Security[i] + for _, p := range v.AdditionalProperties { + if p == nil { + continue + } + if p.Value != nil { + converted[p.Name] = p.Value.Value + } else { + converted[p.Name] = nil + } + } + } + } + + if g.Tags != nil { + k.Tags = make([]Tag, len(g.Tags)) + for i, v := range g.Tags { + if v == nil { + continue + } else if nok, err := k.Tags[i].FromGnostic(v); err != nil { + return false, err + } else if !nok { + ok = false + } + } + } + + if g.ExternalDocs != nil { + k.ExternalDocs = &ExternalDocumentation{} + if nok, err := k.ExternalDocs.FromGnostic(g.ExternalDocs); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + return ok, nil +} + +// Info + +func (k *Info) FromGnostic(g *openapi_v2.Info) (ok bool, err error) { + ok = true + if g == nil { + return true, nil + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + + if nok, err := k.InfoProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +func (k *InfoProps) FromGnostic(g *openapi_v2.Info) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + k.Description = g.Description + k.Title = g.Title + k.TermsOfService = g.TermsOfService + + if g.Contact != nil { + k.Contact = &ContactInfo{} + + if nok, err := k.Contact.FromGnostic(g.Contact); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.License != nil { + k.License = &License{} + if nok, err := k.License.FromGnostic(g.License); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + k.Version = g.Version + return ok, nil +} + +func (k *License) FromGnostic(g *openapi_v2.License) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + k.Name = g.Name + k.URL = g.Url + + // License does not embed to VendorExtensible! + // data loss from g.VendorExtension + if len(g.VendorExtension) != 0 { + ok = false + } + + return ok, nil +} + +func (k *ContactInfo) FromGnostic(g *openapi_v2.Contact) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + k.Name = g.Name + k.URL = g.Url + k.Email = g.Email + + // ContactInfo does not embed to VendorExtensible! + // data loss from g.VendorExtension + if len(g.VendorExtension) != 0 { + ok = false + } + + return ok, nil +} + +// Paths + +func (k *Paths) FromGnostic(g *openapi_v2.Paths) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + if g.Path != nil { + k.Paths = make(map[string]PathItem, len(g.Path)) + for _, v := range g.Path { + if v == nil { + continue + } + + converted := PathItem{} + if nok, err := converted.FromGnostic(v.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + + k.Paths[v.Name] = converted + } + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + return ok, nil +} + +func (k *PathItem) FromGnostic(g *openapi_v2.PathItem) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + + if nok, err := k.PathItemProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.Refable.FromGnostic(g.XRef); err != nil { + return false, err + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + return ok, nil +} + +func (k *PathItemProps) FromGnostic(g *openapi_v2.PathItem) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + if g.Get != nil { + k.Get = &Operation{} + if nok, err := k.Get.FromGnostic(g.Get); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Put != nil { + k.Put = &Operation{} + if nok, err := k.Put.FromGnostic(g.Put); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Post != nil { + k.Post = &Operation{} + if nok, err := k.Post.FromGnostic(g.Post); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Delete != nil { + k.Delete = &Operation{} + if nok, err := k.Delete.FromGnostic(g.Delete); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Options != nil { + k.Options = &Operation{} + if nok, err := k.Options.FromGnostic(g.Options); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Head != nil { + k.Head = &Operation{} + if nok, err := k.Head.FromGnostic(g.Head); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Patch != nil { + k.Patch = &Operation{} + if nok, err := k.Patch.FromGnostic(g.Patch); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Parameters != nil { + k.Parameters = make([]Parameter, len(g.Parameters)) + for i, v := range g.Parameters { + if v == nil { + continue + } else if nok, err := k.Parameters[i].FromGnosticParametersItem(v); err != nil { + return false, err + } else if !nok { + ok = false + } + } + } + + return ok, nil +} + +func (k *Operation) FromGnostic(g *openapi_v2.Operation) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + + if nok, err := k.OperationProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +func (k *OperationProps) FromGnostic(g *openapi_v2.Operation) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + k.Description = g.Description + k.Consumes = g.Consumes + k.Produces = g.Produces + k.Schemes = g.Schemes + k.Tags = g.Tags + k.Summary = g.Summary + + if g.ExternalDocs != nil { + k.ExternalDocs = &ExternalDocumentation{} + if nok, err := k.ExternalDocs.FromGnostic(g.ExternalDocs); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + k.ID = g.OperationId + k.Deprecated = g.Deprecated + + if g.Security != nil { + k.Security = make([]map[string][]string, len(g.Security)) + for i, v := range g.Security { + if v == nil || v.AdditionalProperties == nil { + continue + } + + k.Security[i] = make(map[string][]string, len(v.AdditionalProperties)) + converted := k.Security[i] + for _, p := range v.AdditionalProperties { + if p == nil { + continue + } + + if p.Value != nil { + converted[p.Name] = p.Value.Value + } else { + converted[p.Name] = nil + } + } + } + } + + if g.Parameters != nil { + k.Parameters = make([]Parameter, len(g.Parameters)) + for i, v := range g.Parameters { + if v == nil { + continue + } else if nok, err := k.Parameters[i].FromGnosticParametersItem(v); err != nil { + return false, err + } else if !nok { + ok = false + } + } + } + + if g.Responses != nil { + k.Responses = &Responses{} + if nok, err := k.Responses.FromGnostic(g.Responses); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + return ok, nil +} + +// Responses + +func (k *Responses) FromGnostic(g *openapi_v2.Responses) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + + if nok, err := k.ResponsesProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +func (k *ResponsesProps) FromGnostic(g *openapi_v2.Responses) (ok bool, err error) { + if g == nil { + return true, nil + } else if g.ResponseCode == nil { + return ok, nil + } + + ok = true + for _, v := range g.ResponseCode { + if v == nil { + continue + } + if v.Name == "default" { + k.Default = &Response{} + if nok, err := k.Default.FromGnosticResponseValue(v.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + } else if nk, err := strconv.Atoi(v.Name); err != nil { + // This should actually never fail, unless gnostic struct was + // manually/purposefully tampered with at runtime. + // Gnostic's ParseDocument validates that all StatusCodeResponses + // keys adhere to the following regex ^([0-9]{3})$|^(default)$ + ok = false + } else { + if k.StatusCodeResponses == nil { + k.StatusCodeResponses = map[int]Response{} + } + + res := Response{} + if nok, err := res.FromGnosticResponseValue(v.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + k.StatusCodeResponses[nk] = res + } + } + + return ok, nil +} + +func (k *Response) FromGnostic(g *openapi_v2.Response) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + // Refable case handled in FromGnosticResponseValue + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + + if nok, err := k.ResponseProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +func (k *Response) FromGnosticResponseValue(g *openapi_v2.ResponseValue) (ok bool, err error) { + ok = true + if ref := g.GetJsonReference(); ref != nil { + k.Description = ref.Description + + if err := k.Refable.FromGnostic(ref.XRef); err != nil { + return false, err + } + } else if nok, err := k.FromGnostic(g.GetResponse()); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +func (k *ResponseProps) FromGnostic(g *openapi_v2.Response) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + k.Description = g.Description + + if g.Schema != nil { + k.Schema = &Schema{} + if nok, err := k.Schema.FromGnosticSchemaItem(g.Schema); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Headers != nil { + k.Headers = make(map[string]Header, len(g.Headers.AdditionalProperties)) + for _, v := range g.Headers.AdditionalProperties { + if v == nil { + continue + } + + converted := Header{} + if err := converted.FromGnostic(v.GetValue()); err != nil { + return false, err + } + + k.Headers[v.Name] = converted + } + } + + if g.Examples != nil { + k.Examples = make(map[string]interface{}, len(g.Examples.AdditionalProperties)) + for _, v := range g.Examples.AdditionalProperties { + if v == nil { + continue + } else if v.Value == nil { + k.Examples[v.Name] = nil + continue + } + + var iface interface{} + if err := v.Value.ToRawInfo().Decode(&iface); err != nil { + return false, err + } else { + k.Examples[v.Name] = iface + } + } + } + + return ok, nil +} + +// Header + +func (k *Header) FromGnostic(g *openapi_v2.Header) (err error) { + if g == nil { + return nil + } + + if err := k.CommonValidations.FromGnostic(g); err != nil { + return err + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return err + } + + if err := k.SimpleSchema.FromGnostic(g); err != nil { + return err + } + + if err := k.HeaderProps.FromGnostic(g); err != nil { + return err + } + + return nil +} + +func (k *HeaderProps) FromGnostic(g *openapi_v2.Header) error { + if g == nil { + return nil + } + + // All other fields of openapi_v2.Header are handled by + // the embeded fields, commonvalidations, etc. + k.Description = g.Description + return nil +} + +// Parameters + +func (k *Parameter) FromGnostic(g *openapi_v2.Parameter) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + switch p := g.Oneof.(type) { + case *openapi_v2.Parameter_BodyParameter: + if nok, err := k.ParamProps.FromGnostic(p.BodyParameter); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.VendorExtensible.FromGnostic(p.BodyParameter.GetVendorExtension()); err != nil { + return false, err + } + + return ok, nil + case *openapi_v2.Parameter_NonBodyParameter: + switch nb := g.GetNonBodyParameter().Oneof.(type) { + case *openapi_v2.NonBodyParameter_HeaderParameterSubSchema: + if nok, err := k.ParamProps.FromGnostic(nb.HeaderParameterSubSchema); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.SimpleSchema.FromGnostic(nb.HeaderParameterSubSchema); err != nil { + return false, err + } + + if err := k.CommonValidations.FromGnostic(nb.HeaderParameterSubSchema); err != nil { + return false, err + } + + if err := k.VendorExtensible.FromGnostic(nb.HeaderParameterSubSchema.GetVendorExtension()); err != nil { + return false, err + } + + return ok, nil + case *openapi_v2.NonBodyParameter_FormDataParameterSubSchema: + if nok, err := k.ParamProps.FromGnostic(nb.FormDataParameterSubSchema); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.SimpleSchema.FromGnostic(nb.FormDataParameterSubSchema); err != nil { + return false, err + } + + if err := k.CommonValidations.FromGnostic(nb.FormDataParameterSubSchema); err != nil { + return false, err + } + + if err := k.VendorExtensible.FromGnostic(nb.FormDataParameterSubSchema.GetVendorExtension()); err != nil { + return false, err + } + + return ok, nil + case *openapi_v2.NonBodyParameter_QueryParameterSubSchema: + if nok, err := k.ParamProps.FromGnostic(nb.QueryParameterSubSchema); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.SimpleSchema.FromGnostic(nb.QueryParameterSubSchema); err != nil { + return false, err + } + + if err := k.CommonValidations.FromGnostic(nb.QueryParameterSubSchema); err != nil { + return false, err + } + + if err := k.VendorExtensible.FromGnostic(nb.QueryParameterSubSchema.GetVendorExtension()); err != nil { + return false, err + } + + return ok, nil + case *openapi_v2.NonBodyParameter_PathParameterSubSchema: + if nok, err := k.ParamProps.FromGnostic(nb.PathParameterSubSchema); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.SimpleSchema.FromGnostic(nb.PathParameterSubSchema); err != nil { + return false, err + } + + if err := k.CommonValidations.FromGnostic(nb.PathParameterSubSchema); err != nil { + return false, err + } + + if err := k.VendorExtensible.FromGnostic(nb.PathParameterSubSchema.GetVendorExtension()); err != nil { + return false, err + } + + return ok, nil + default: + return false, errors.New("unrecognized nonbody type for Parameter") + } + default: + return false, errors.New("unrecognized type for Parameter") + } +} + +type GnosticCommonParamProps interface { + GetName() string + GetRequired() bool + GetIn() string + GetDescription() string +} + +type GnosticCommonParamPropsBodyParameter interface { + GetSchema() *openapi_v2.Schema +} + +type GnosticCommonParamPropsFormData interface { + GetAllowEmptyValue() bool +} + +func (k *ParamProps) FromGnostic(g GnosticCommonParamProps) (ok bool, err error) { + ok = true + k.Description = g.GetDescription() + k.In = g.GetIn() + k.Name = g.GetName() + k.Required = g.GetRequired() + + if formDataParameter, success := g.(GnosticCommonParamPropsFormData); success { + k.AllowEmptyValue = formDataParameter.GetAllowEmptyValue() + } + + if bodyParameter, success := g.(GnosticCommonParamPropsBodyParameter); success { + if bodyParameter.GetSchema() != nil { + k.Schema = &Schema{} + if nok, err := k.Schema.FromGnostic(bodyParameter.GetSchema()); err != nil { + return false, err + } else if !nok { + ok = false + } + } + } + + return ok, nil +} + +// PB types use a different structure than we do for "refable". For PB, there is +// a wrappign oneof type that could be a ref or the type +func (k *Parameter) FromGnosticParametersItem(g *openapi_v2.ParametersItem) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + if ref := g.GetJsonReference(); ref != nil { + k.Description = ref.Description + + if err := k.Refable.FromGnostic(ref.XRef); err != nil { + return false, err + } + } else if nok, err := k.FromGnostic(g.GetParameter()); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +// Schema + +func (k *Schema) FromGnostic(g *openapi_v2.Schema) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + + // SwaggerSchemaProps + k.Discriminator = g.Discriminator + k.ReadOnly = g.ReadOnly + k.Description = g.Description + if g.ExternalDocs != nil { + k.ExternalDocs = &ExternalDocumentation{} + if nok, err := k.ExternalDocs.FromGnostic(g.ExternalDocs); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Example != nil { + if err := g.Example.ToRawInfo().Decode(&k.Example); err != nil { + return false, err + } + } + + // SchemaProps + if err := k.Ref.FromGnostic(g.XRef); err != nil { + return false, err + } + k.Type = g.Type.GetValue() + k.Format = g.GetFormat() + k.Title = g.GetTitle() + + // These below fields are not available in gnostic types, so will never + // be populated. This means roundtrips which make use of these + // (non-official, kube-only) fields will lose information. + // + // Schema.ID is not available in official spec + // Schema.$schema + // Schema.Nullable - in openapiv3, not v2 + // Schema.AnyOf - in openapiv3, not v2 + // Schema.OneOf - in openapiv3, not v2 + // Schema.Not - in openapiv3, not v2 + // Schema.PatternProperties - in openapiv3, not v2 + // Schema.Dependencies - in openapiv3, not v2 + // Schema.AdditionalItems + // Schema.Definitions - not part of spec + // Schema.ExtraProps - gnostic parser rejects any keys it does not recognize + + if g.GetDefault() != nil { + if err := g.GetDefault().ToRawInfo().Decode(&k.Default); err != nil { + return false, err + } + } + + // These conditionals (!= 0) follow gnostic's logic for ToRawInfo + // The keys in gnostic source are only included if nonzero. + + if g.Maximum != 0.0 { + k.Maximum = &g.Maximum + } + + if g.Minimum != 0.0 { + k.Minimum = &g.Minimum + } + + k.ExclusiveMaximum = g.ExclusiveMaximum + k.ExclusiveMinimum = g.ExclusiveMinimum + + if g.MaxLength != 0 { + k.MaxLength = &g.MaxLength + } + + if g.MinLength != 0 { + k.MinLength = &g.MinLength + } + + k.Pattern = g.GetPattern() + + if g.MaxItems != 0 { + k.MaxItems = &g.MaxItems + } + + if g.MinItems != 0 { + k.MinItems = &g.MinItems + } + k.UniqueItems = g.UniqueItems + + if g.MultipleOf != 0 { + k.MultipleOf = &g.MultipleOf + } + + for _, v := range g.GetEnum() { + if v == nil { + continue + } + + var convert interface{} + if err := v.ToRawInfo().Decode(&convert); err != nil { + return false, err + } + k.Enum = append(k.Enum, convert) + } + + if g.MaxProperties != 0 { + k.MaxProperties = &g.MaxProperties + } + + if g.MinProperties != 0 { + k.MinProperties = &g.MinProperties + } + + k.Required = g.Required + + if g.GetItems() != nil { + k.Items = &SchemaOrArray{} + for _, v := range g.Items.GetSchema() { + if v == nil { + continue + } + + schema := Schema{} + if nok, err := schema.FromGnostic(v); err != nil { + return false, err + } else if !nok { + ok = false + } + k.Items.Schemas = append(k.Items.Schemas, schema) + } + + if len(k.Items.Schemas) == 1 { + k.Items.Schema = &k.Items.Schemas[0] + k.Items.Schemas = nil + } + } + + for i, v := range g.GetAllOf() { + if v == nil { + continue + } + + k.AllOf = append(k.AllOf, Schema{}) + if nok, err := k.AllOf[i].FromGnostic(v); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Properties != nil { + k.Properties = make(map[string]Schema) + for _, namedSchema := range g.Properties.AdditionalProperties { + if namedSchema == nil { + continue + } + val := &Schema{} + if nok, err := val.FromGnostic(namedSchema.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + + k.Properties[namedSchema.Name] = *val + } + } + + if g.AdditionalProperties != nil { + k.AdditionalProperties = &SchemaOrBool{} + if g.AdditionalProperties.GetSchema() == nil { + k.AdditionalProperties.Allows = g.AdditionalProperties.GetBoolean() + } else { + k.AdditionalProperties.Schema = &Schema{} + k.AdditionalProperties.Allows = true + + if nok, err := k.AdditionalProperties.Schema.FromGnostic(g.AdditionalProperties.GetSchema()); err != nil { + return false, err + } else if !nok { + ok = false + } + } + } + + return ok, nil +} + +func (k *Schema) FromGnosticSchemaItem(g *openapi_v2.SchemaItem) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + + switch p := g.Oneof.(type) { + case *openapi_v2.SchemaItem_FileSchema: + fileSchema := p.FileSchema + + if err := k.VendorExtensible.FromGnostic(fileSchema.VendorExtension); err != nil { + return false, err + } + + k.Format = fileSchema.Format + k.Title = fileSchema.Title + k.Description = fileSchema.Description + k.Required = fileSchema.Required + k.Type = []string{fileSchema.Type} + k.ReadOnly = fileSchema.ReadOnly + + if fileSchema.ExternalDocs != nil { + k.ExternalDocs = &ExternalDocumentation{} + if nok, err := k.ExternalDocs.FromGnostic(fileSchema.ExternalDocs); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if fileSchema.Example != nil { + if err := fileSchema.Example.ToRawInfo().Decode(&k.Example); err != nil { + return false, err + } + } + + if fileSchema.Default != nil { + if err := fileSchema.Default.ToRawInfo().Decode(&k.Default); err != nil { + return false, err + } + } + + case *openapi_v2.SchemaItem_Schema: + schema := p.Schema + + if nok, err := k.FromGnostic(schema); err != nil { + return false, err + } else if !nok { + ok = false + } + default: + return false, errors.New("unrecognized type for SchemaItem") + } + + return ok, nil +} + +// SecurityDefinitions + +func (k SecurityDefinitions) FromGnostic(g *openapi_v2.SecurityDefinitions) error { + for _, v := range g.GetAdditionalProperties() { + if v == nil { + continue + } + secScheme := &SecurityScheme{} + if err := secScheme.FromGnostic(v.Value); err != nil { + return err + } + k[v.Name] = secScheme + } + + return nil +} + +type GnosticCommonSecurityDefinition interface { + GetType() string + GetDescription() string +} + +func (k *SecuritySchemeProps) FromGnostic(g GnosticCommonSecurityDefinition) error { + k.Type = g.GetType() + k.Description = g.GetDescription() + + if hasName, success := g.(interface{ GetName() string }); success { + k.Name = hasName.GetName() + } + + if hasIn, success := g.(interface{ GetIn() string }); success { + k.In = hasIn.GetIn() + } + + if hasFlow, success := g.(interface{ GetFlow() string }); success { + k.Flow = hasFlow.GetFlow() + } + + if hasAuthURL, success := g.(interface{ GetAuthorizationUrl() string }); success { + k.AuthorizationURL = hasAuthURL.GetAuthorizationUrl() + } + + if hasTokenURL, success := g.(interface{ GetTokenUrl() string }); success { + k.TokenURL = hasTokenURL.GetTokenUrl() + } + + if hasScopes, success := g.(interface { + GetScopes() *openapi_v2.Oauth2Scopes + }); success { + scopes := hasScopes.GetScopes() + if scopes != nil { + k.Scopes = make(map[string]string, len(scopes.AdditionalProperties)) + for _, v := range scopes.AdditionalProperties { + if v == nil { + continue + } + + k.Scopes[v.Name] = v.Value + } + } + } + + return nil +} + +func (k *SecurityScheme) FromGnostic(g *openapi_v2.SecurityDefinitionsItem) error { + if g == nil { + return nil + } + + switch s := g.Oneof.(type) { + case *openapi_v2.SecurityDefinitionsItem_ApiKeySecurity: + if err := k.SecuritySchemeProps.FromGnostic(s.ApiKeySecurity); err != nil { + return err + } + if err := k.VendorExtensible.FromGnostic(s.ApiKeySecurity.VendorExtension); err != nil { + return err + } + return nil + case *openapi_v2.SecurityDefinitionsItem_BasicAuthenticationSecurity: + if err := k.SecuritySchemeProps.FromGnostic(s.BasicAuthenticationSecurity); err != nil { + return err + } + if err := k.VendorExtensible.FromGnostic(s.BasicAuthenticationSecurity.VendorExtension); err != nil { + return err + } + return nil + case *openapi_v2.SecurityDefinitionsItem_Oauth2AccessCodeSecurity: + if err := k.SecuritySchemeProps.FromGnostic(s.Oauth2AccessCodeSecurity); err != nil { + return err + } + if err := k.VendorExtensible.FromGnostic(s.Oauth2AccessCodeSecurity.VendorExtension); err != nil { + return err + } + return nil + case *openapi_v2.SecurityDefinitionsItem_Oauth2ApplicationSecurity: + if err := k.SecuritySchemeProps.FromGnostic(s.Oauth2ApplicationSecurity); err != nil { + return err + } + if err := k.VendorExtensible.FromGnostic(s.Oauth2ApplicationSecurity.VendorExtension); err != nil { + return err + } + return nil + case *openapi_v2.SecurityDefinitionsItem_Oauth2ImplicitSecurity: + if err := k.SecuritySchemeProps.FromGnostic(s.Oauth2ImplicitSecurity); err != nil { + return err + } + if err := k.VendorExtensible.FromGnostic(s.Oauth2ImplicitSecurity.VendorExtension); err != nil { + return err + } + return nil + case *openapi_v2.SecurityDefinitionsItem_Oauth2PasswordSecurity: + if err := k.SecuritySchemeProps.FromGnostic(s.Oauth2PasswordSecurity); err != nil { + return err + } + if err := k.VendorExtensible.FromGnostic(s.Oauth2PasswordSecurity.VendorExtension); err != nil { + return err + } + return nil + default: + return errors.New("unrecognized SecurityDefinitionsItem") + } +} + +// Tag + +func (k *Tag) FromGnostic(g *openapi_v2.Tag) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + if nok, err := k.TagProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + return ok, nil +} + +func (k *TagProps) FromGnostic(g *openapi_v2.Tag) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + k.Description = g.Description + k.Name = g.Name + + if g.ExternalDocs != nil { + k.ExternalDocs = &ExternalDocumentation{} + if nok, err := k.ExternalDocs.FromGnostic(g.ExternalDocs); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + return ok, nil +} + +// ExternalDocumentation + +func (k *ExternalDocumentation) FromGnostic(g *openapi_v2.ExternalDocs) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + k.Description = g.Description + k.URL = g.Url + + // data loss! g.VendorExtension + if len(g.VendorExtension) != 0 { + ok = false + } + + return ok, nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go index 597fc9631..05310c46b 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go @@ -18,6 +18,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) const ( @@ -41,6 +43,9 @@ type Header struct { // MarshalJSON marshal this to JSON func (h Header) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(h) + } b1, err := json.Marshal(h.CommonValidations) if err != nil { return nil, err @@ -60,8 +65,26 @@ func (h Header) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3, b4), nil } +func (h Header) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + CommonValidations commonValidationsOmitZero `json:",inline"` + SimpleSchema simpleSchemaOmitZero `json:",inline"` + Extensions + HeaderProps + } + x.CommonValidations = commonValidationsOmitZero(h.CommonValidations) + x.SimpleSchema = simpleSchemaOmitZero(h.SimpleSchema) + x.Extensions = internal.SanitizeExtensions(h.Extensions) + x.HeaderProps = h.HeaderProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON unmarshals this header from JSON func (h *Header) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, h) + } + if err := json.Unmarshal(data, &h.CommonValidations); err != nil { return err } @@ -73,3 +96,23 @@ func (h *Header) UnmarshalJSON(data []byte) error { } return json.Unmarshal(data, &h.HeaderProps) } + +func (h *Header) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + CommonValidations + SimpleSchema + Extensions + HeaderProps + } + + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + + h.CommonValidations = x.CommonValidations + h.SimpleSchema = x.SimpleSchema + h.Extensions = internal.SanitizeExtensions(x.Extensions) + h.HeaderProps = x.HeaderProps + + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go index 51a2f5781..d667b705b 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go @@ -19,6 +19,8 @@ import ( "strings" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // Extensions vendor specific extensions @@ -87,6 +89,19 @@ func (e Extensions) GetObject(key string, out interface{}) error { return nil } +func (e Extensions) sanitizeWithExtra() (extra map[string]any) { + for k, v := range e { + if !internal.IsExtensionKey(k) { + if extra == nil { + extra = make(map[string]any) + } + extra[k] = v + delete(e, k) + } + } + return extra +} + // VendorExtensible composition block. type VendorExtensible struct { Extensions Extensions @@ -154,6 +169,9 @@ type Info struct { // MarshalJSON marshal this to JSON func (i Info) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(i) + } b1, err := json.Marshal(i.InfoProps) if err != nil { return nil, err @@ -165,10 +183,37 @@ func (i Info) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (i Info) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + InfoProps + } + x.Extensions = i.Extensions + x.InfoProps = i.InfoProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON marshal this from JSON func (i *Info) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, i) + } + if err := json.Unmarshal(data, &i.InfoProps); err != nil { return err } return json.Unmarshal(data, &i.VendorExtensible) } + +func (i *Info) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + Extensions + InfoProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + i.Extensions = internal.SanitizeExtensions(x.Extensions) + i.InfoProps = x.InfoProps + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go index b75aefe16..4132467d2 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go @@ -18,6 +18,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) const ( @@ -35,6 +37,18 @@ type SimpleSchema struct { Example interface{} `json:"example,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type simpleSchemaOmitZero struct { + Type string `json:"type,omitempty"` + Nullable bool `json:"nullable,omitzero"` + Format string `json:"format,omitempty"` + Items *Items `json:"items,omitzero"` + CollectionFormat string `json:"collectionFormat,omitempty"` + Default interface{} `json:"default,omitempty"` + Example interface{} `json:"example,omitempty"` +} + // CommonValidations describe common JSON-schema validations type CommonValidations struct { Maximum *float64 `json:"maximum,omitempty"` @@ -51,6 +65,23 @@ type CommonValidations struct { Enum []interface{} `json:"enum,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type commonValidationsOmitZero struct { + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitzero"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitzero"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitzero"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` +} + // Items a limited subset of JSON-Schema's items object. // It is used by parameter definitions that are not located in "body". // @@ -64,6 +95,10 @@ type Items struct { // UnmarshalJSON hydrates this items instance with the data from JSON func (i *Items) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, i) + } + var validations CommonValidations if err := json.Unmarshal(data, &validations); err != nil { return err @@ -87,8 +122,30 @@ func (i *Items) UnmarshalJSON(data []byte) error { return nil } +func (i *Items) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + CommonValidations + SimpleSchema + Extensions + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := i.Refable.Ref.fromMap(x.Extensions); err != nil { + return err + } + + i.CommonValidations = x.CommonValidations + i.SimpleSchema = x.SimpleSchema + i.Extensions = internal.SanitizeExtensions(x.Extensions) + return nil +} + // MarshalJSON converts this items object to JSON func (i Items) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(i) + } b1, err := json.Marshal(i.CommonValidations) if err != nil { return nil, err @@ -107,3 +164,17 @@ func (i Items) MarshalJSON() ([]byte, error) { } return swag.ConcatJSON(b4, b3, b1, b2), nil } + +func (i Items) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + CommonValidations commonValidationsOmitZero `json:",inline"` + SimpleSchema simpleSchemaOmitZero `json:",inline"` + Ref string `json:"$ref,omitempty"` + Extensions + } + x.CommonValidations = commonValidationsOmitZero(i.CommonValidations) + x.SimpleSchema = simpleSchemaOmitZero(i.SimpleSchema) + x.Ref = i.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(i.Extensions) + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go index c7acd8672..63eed3460 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go @@ -18,6 +18,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // OperationProps describes an operation @@ -40,6 +42,23 @@ type OperationProps struct { Responses *Responses `json:"responses,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type operationPropsOmitZero struct { + Description string `json:"description,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + ID string `json:"operationId,omitempty"` + Deprecated bool `json:"deprecated,omitempty,omitzero"` + Security []map[string][]string `json:"security,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` + Responses *Responses `json:"responses,omitzero"` +} + // MarshalJSON takes care of serializing operation properties to JSON // // We use a custom marhaller here to handle a special cases related to @@ -75,14 +94,35 @@ type Operation struct { // UnmarshalJSON hydrates this items instance with the data from JSON func (o *Operation) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, o) + } + if err := json.Unmarshal(data, &o.OperationProps); err != nil { return err } return json.Unmarshal(data, &o.VendorExtensible) } +func (o *Operation) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + type OperationPropsNoMethods OperationProps // strip MarshalJSON method + var x struct { + Extensions + OperationPropsNoMethods + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + o.Extensions = internal.SanitizeExtensions(x.Extensions) + o.OperationProps = OperationProps(x.OperationPropsNoMethods) + return nil +} + // MarshalJSON converts this items object to JSON func (o Operation) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(o) + } b1, err := json.Marshal(o.OperationProps) if err != nil { return nil, err @@ -94,3 +134,13 @@ func (o Operation) MarshalJSON() ([]byte, error) { concated := swag.ConcatJSON(b1, b2) return concated, nil } + +func (o Operation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + OperationProps operationPropsOmitZero `json:",inline"` + } + x.Extensions = internal.SanitizeExtensions(o.Extensions) + x.OperationProps = operationPropsOmitZero(o.OperationProps) + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go index 218513974..53d1e0aa9 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go @@ -18,6 +18,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // ParamProps describes the specific attributes of an operation parameter @@ -34,30 +36,46 @@ type ParamProps struct { AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type paramPropsOmitZero struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Required bool `json:"required,omitzero"` + Schema *Schema `json:"schema,omitzero"` + AllowEmptyValue bool `json:"allowEmptyValue,omitzero"` +} + // Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). // // There are five possible parameter types. // * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part -// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, -// the path parameter is `itemId`. +// +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. +// // * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. // * Header - Custom headers that are expected as part of the request. // * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be -// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for -// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist -// together for the same operation. +// +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// // * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or -// `multipart/form-data` are used as the content type of the request (in Swagger's definition, -// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used -// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be -// declared together with a body parameter for the same operation. Form parameters have a different format based on -// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). -// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. -// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple -// parameters that are being transferred. -// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. -// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is -// `submit-name`. This type of form parameters is more commonly used for file transfers. +// +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. // // For more information: http://goo.gl/8us55a#parameterObject type Parameter struct { @@ -70,6 +88,10 @@ type Parameter struct { // UnmarshalJSON hydrates this items instance with the data from JSON func (p *Parameter) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, p) + } + if err := json.Unmarshal(data, &p.CommonValidations); err != nil { return err } @@ -85,8 +107,31 @@ func (p *Parameter) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &p.ParamProps) } +func (p *Parameter) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + CommonValidations + SimpleSchema + Extensions + ParamProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := p.Refable.Ref.fromMap(x.Extensions); err != nil { + return err + } + p.CommonValidations = x.CommonValidations + p.SimpleSchema = x.SimpleSchema + p.Extensions = internal.SanitizeExtensions(x.Extensions) + p.ParamProps = x.ParamProps + return nil +} + // MarshalJSON converts this items object to JSON func (p Parameter) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(p) + } b1, err := json.Marshal(p.CommonValidations) if err != nil { return nil, err @@ -109,3 +154,19 @@ func (p Parameter) MarshalJSON() ([]byte, error) { } return swag.ConcatJSON(b3, b1, b2, b4, b5), nil } + +func (p Parameter) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + CommonValidations commonValidationsOmitZero `json:",inline"` + SimpleSchema simpleSchemaOmitZero `json:",inline"` + ParamProps paramPropsOmitZero `json:",inline"` + Ref string `json:"$ref,omitempty"` + Extensions + } + x.CommonValidations = commonValidationsOmitZero(p.CommonValidations) + x.SimpleSchema = simpleSchemaOmitZero(p.SimpleSchema) + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.ParamProps = paramPropsOmitZero(p.ParamProps) + x.Ref = p.Refable.Ref.String() + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go index 04de58f00..1d1588cb9 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go @@ -18,6 +18,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // PathItemProps the path item specific properties @@ -46,6 +48,10 @@ type PathItem struct { // UnmarshalJSON hydrates this items instance with the data from JSON func (p *PathItem) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, p) + } + if err := json.Unmarshal(data, &p.Refable); err != nil { return err } @@ -55,8 +61,29 @@ func (p *PathItem) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &p.PathItemProps) } +func (p *PathItem) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + Extensions + PathItemProps + } + + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := p.Refable.Ref.fromMap(x.Extensions); err != nil { + return err + } + p.Extensions = internal.SanitizeExtensions(x.Extensions) + p.PathItemProps = x.PathItemProps + + return nil +} + // MarshalJSON converts this items object to JSON func (p PathItem) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(p) + } b3, err := json.Marshal(p.Refable) if err != nil { return nil, err @@ -72,3 +99,15 @@ func (p PathItem) MarshalJSON() ([]byte, error) { concated := swag.ConcatJSON(b3, b4, b5) return concated, nil } + +func (p PathItem) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + Extensions + PathItemProps + } + x.Ref = p.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.PathItemProps = p.PathItemProps + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go index 319aba879..18f6a9f42 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go @@ -16,9 +16,12 @@ package spec import ( "encoding/json" + "fmt" "strings" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // Paths holds the relative paths to the individual endpoints. @@ -34,6 +37,10 @@ type Paths struct { // UnmarshalJSON hydrates this items instance with the data from JSON func (p *Paths) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, p) + } + var res map[string]json.RawMessage if err := json.Unmarshal(data, &res); err != nil { return err @@ -63,8 +70,65 @@ func (p *Paths) UnmarshalJSON(data []byte) error { return nil } +func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + tok, err := dec.ReadToken() + if err != nil { + return err + } + var ext any + var pi PathItem + switch k := tok.Kind(); k { + case 'n': + return nil // noop + case '{': + for { + tok, err := dec.ReadToken() + if err != nil { + return err + } + + if tok.Kind() == '}' { + return nil + } + + switch k := tok.String(); { + case internal.IsExtensionKey(k): + ext = nil + if err := opts.UnmarshalNext(dec, &ext); err != nil { + return err + } + + if p.Extensions == nil { + p.Extensions = make(map[string]any) + } + p.Extensions[k] = ext + case len(k) > 0 && k[0] == '/': + pi = PathItem{} + if err := opts.UnmarshalNext(dec, &pi); err != nil { + return err + } + + if p.Paths == nil { + p.Paths = make(map[string]PathItem) + } + p.Paths[k] = pi + default: + _, err := dec.ReadValue() // skip value + if err != nil { + return err + } + } + } + default: + return fmt.Errorf("unknown JSON kind: %v", k) + } +} + // MarshalJSON converts this items object to JSON func (p Paths) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(p) + } b1, err := json.Marshal(p.VendorExtensible) if err != nil { return nil, err @@ -83,3 +147,18 @@ func (p Paths) MarshalJSON() ([]byte, error) { concated := swag.ConcatJSON(b1, b2) return concated, nil } + +func (p Paths) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + m := make(map[string]any, len(p.Extensions)+len(p.Paths)) + for k, v := range p.Extensions { + if internal.IsExtensionKey(k) { + m[k] = v + } + } + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + m[k] = v + } + } + return opts.MarshalNext(enc, m) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go index 1405bfd8e..775b3b0c3 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go @@ -21,6 +21,8 @@ import ( "path/filepath" "github.com/go-openapi/jsonreference" + + "k8s.io/kube-openapi/pkg/internal" ) // Refable is a struct for things that accept a $ref property @@ -149,19 +151,5 @@ func (r *Ref) UnmarshalJSON(d []byte) error { } func (r *Ref) fromMap(v map[string]interface{}) error { - if v == nil { - return nil - } - - if vv, ok := v["$ref"]; ok { - if str, ok := vv.(string); ok { - ref, err := jsonreference.New(str) - if err != nil { - return err - } - *r = Ref{Ref: ref} - } - } - - return nil + return internal.JSONRefFromMap(&r.Ref, v) } diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go index 9fd717ec3..3ff1fe132 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go @@ -18,6 +18,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // ResponseProps properties specific to a response @@ -28,6 +30,15 @@ type ResponseProps struct { Examples map[string]interface{} `json:"examples,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type responsePropsOmitZero struct { + Description string `json:"description,omitempty"` + Schema *Schema `json:"schema,omitzero"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` +} + // Response describes a single response from an API Operation. // // For more information: http://goo.gl/8us55a#responseObject @@ -39,17 +50,47 @@ type Response struct { // UnmarshalJSON hydrates this items instance with the data from JSON func (r *Response) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, r) + } + if err := json.Unmarshal(data, &r.ResponseProps); err != nil { return err } if err := json.Unmarshal(data, &r.Refable); err != nil { return err } - return json.Unmarshal(data, &r.VendorExtensible) + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + + return nil +} + +func (r *Response) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + ResponseProps + Extensions + } + + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + + if err := r.Refable.Ref.fromMap(x.Extensions); err != nil { + return err + } + r.Extensions = internal.SanitizeExtensions(x.Extensions) + r.ResponseProps = x.ResponseProps + + return nil } // MarshalJSON converts this items object to JSON func (r Response) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.ResponseProps) if err != nil { return nil, err @@ -65,6 +106,18 @@ func (r Response) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (r Response) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + Extensions + ResponseProps responsePropsOmitZero `json:",inline"` + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.ResponseProps = responsePropsOmitZero(r.ResponseProps) + return opts.MarshalNext(enc, x) +} + // NewResponse creates a new response instance func NewResponse() *Response { return new(Response) diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go index b2c3883a9..d9ad760a4 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go @@ -16,10 +16,13 @@ package spec import ( "encoding/json" + "fmt" "reflect" "strconv" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // Responses is a container for the expected responses of an operation. @@ -42,6 +45,10 @@ type Responses struct { // UnmarshalJSON hydrates this items instance with the data from JSON func (r *Responses) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, r) + } + if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { return err } @@ -56,6 +63,9 @@ func (r *Responses) UnmarshalJSON(data []byte) error { // MarshalJSON converts this items object to JSON func (r Responses) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.ResponsesProps) if err != nil { return nil, err @@ -68,6 +78,25 @@ func (r Responses) MarshalJSON() ([]byte, error) { return concated, nil } +func (r Responses) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type ArbitraryKeys map[string]interface{} + var x struct { + ArbitraryKeys + Default *Response `json:"default,omitempty"` + } + x.ArbitraryKeys = make(map[string]any, len(r.Extensions)+len(r.StatusCodeResponses)) + for k, v := range r.Extensions { + if internal.IsExtensionKey(k) { + x.ArbitraryKeys[k] = v + } + } + for k, v := range r.StatusCodeResponses { + x.ArbitraryKeys[strconv.Itoa(k)] = v + } + x.Default = r.Default + return opts.MarshalNext(enc, x) +} + // ResponsesProps describes all responses for an operation. // It tells what is the default response and maps all responses with a // HTTP status code. @@ -90,21 +119,90 @@ func (r ResponsesProps) MarshalJSON() ([]byte, error) { // UnmarshalJSON unmarshals responses from JSON func (r *ResponsesProps) UnmarshalJSON(data []byte) error { - var res map[string]Response + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, r) + } + var res map[string]json.RawMessage if err := json.Unmarshal(data, &res); err != nil { - return nil + return err } if v, ok := res["default"]; ok { - r.Default = &v + value := Response{} + if err := json.Unmarshal(v, &value); err != nil { + return err + } + r.Default = &value delete(res, "default") } for k, v := range res { + // Take all integral keys if nk, err := strconv.Atoi(k); err == nil { if r.StatusCodeResponses == nil { r.StatusCodeResponses = map[int]Response{} } - r.StatusCodeResponses[nk] = v + value := Response{} + if err := json.Unmarshal(v, &value); err != nil { + return err + } + r.StatusCodeResponses[nk] = value } } return nil } + +func (r *Responses) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) (err error) { + tok, err := dec.ReadToken() + if err != nil { + return err + } + var ext any + var resp Response + switch k := tok.Kind(); k { + case 'n': + return nil // noop + case '{': + for { + tok, err := dec.ReadToken() + if err != nil { + return err + } + if tok.Kind() == '}' { + return nil + } + switch k := tok.String(); { + case internal.IsExtensionKey(k): + ext = nil + if err := opts.UnmarshalNext(dec, &ext); err != nil { + return err + } + + if r.Extensions == nil { + r.Extensions = make(map[string]any) + } + r.Extensions[k] = ext + case k == "default": + resp = Response{} + if err := opts.UnmarshalNext(dec, &resp); err != nil { + return err + } + + respCopy := resp + r.ResponsesProps.Default = &respCopy + default: + if nk, err := strconv.Atoi(k); err == nil { + resp = Response{} + if err := opts.UnmarshalNext(dec, &resp); err != nil { + return err + } + + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]Response{} + } + r.StatusCodeResponses[nk] = resp + } + } + } + default: + return fmt.Errorf("unknown JSON kind: %v", k) + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go index b0aeeb0d0..dfbb2e05c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go @@ -21,6 +21,8 @@ import ( "strings" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // BooleanProperty creates a boolean property @@ -194,6 +196,46 @@ type SchemaProps struct { Definitions Definitions `json:"definitions,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type schemaPropsOmitZero struct { + ID string `json:"id,omitempty"` + Ref Ref `json:"-"` + Schema SchemaURL `json:"-"` + Description string `json:"description,omitempty"` + Type StringOrArray `json:"type,omitzero"` + Nullable bool `json:"nullable,omitzero"` + Format string `json:"format,omitempty"` + Title string `json:"title,omitempty"` + Default interface{} `json:"default,omitzero"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitzero"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitzero"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitzero"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *SchemaOrArray `json:"items,omitzero"` + AllOf []Schema `json:"allOf,omitempty"` + OneOf []Schema `json:"oneOf,omitempty"` + AnyOf []Schema `json:"anyOf,omitempty"` + Not *Schema `json:"not,omitzero"` + Properties map[string]Schema `json:"properties,omitempty"` + AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitzero"` + PatternProperties map[string]Schema `json:"patternProperties,omitempty"` + Dependencies Dependencies `json:"dependencies,omitempty"` + AdditionalItems *SchemaOrBool `json:"additionalItems,omitzero"` + Definitions Definitions `json:"definitions,omitempty"` +} + // SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) type SwaggerSchemaProps struct { Discriminator string `json:"discriminator,omitempty"` @@ -202,6 +244,15 @@ type SwaggerSchemaProps struct { Example interface{} `json:"example,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type swaggerSchemaPropsOmitZero struct { + Discriminator string `json:"discriminator,omitempty"` + ReadOnly bool `json:"readOnly,omitzero"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + Example interface{} `json:"example,omitempty"` +} + // Schema the schema object allows the definition of input and output data types. // These types can be objects, but also primitives and arrays. // This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) @@ -432,6 +483,9 @@ func (s *Schema) WithExternalDocs(description, url string) *Schema { // MarshalJSON marshal this to JSON func (s Schema) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.SchemaProps) if err != nil { return nil, fmt.Errorf("schema props %v", err) @@ -463,8 +517,37 @@ func (s Schema) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil } +func (s Schema) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type ArbitraryKeys map[string]interface{} + var x struct { + ArbitraryKeys + SchemaProps schemaPropsOmitZero `json:",inline"` + SwaggerSchemaProps swaggerSchemaPropsOmitZero `json:",inline"` + Schema string `json:"$schema,omitempty"` + Ref string `json:"$ref,omitempty"` + } + x.ArbitraryKeys = make(map[string]any, len(s.Extensions)+len(s.ExtraProps)) + for k, v := range s.Extensions { + if internal.IsExtensionKey(k) { + x.ArbitraryKeys[k] = v + } + } + for k, v := range s.ExtraProps { + x.ArbitraryKeys[k] = v + } + x.SchemaProps = schemaPropsOmitZero(s.SchemaProps) + x.SwaggerSchemaProps = swaggerSchemaPropsOmitZero(s.SwaggerSchemaProps) + x.Ref = s.Ref.String() + x.Schema = string(s.Schema) + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON marshal this from JSON func (s *Schema) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, s) + } + props := struct { SchemaProps SwaggerSchemaProps @@ -511,3 +594,38 @@ func (s *Schema) UnmarshalJSON(data []byte) error { return nil } + +func (s *Schema) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + Extensions + SchemaProps + SwaggerSchemaProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + + if err := x.Ref.fromMap(x.Extensions); err != nil { + return err + } + + if err := x.Schema.fromMap(x.Extensions); err != nil { + return err + } + + delete(x.Extensions, "$ref") + delete(x.Extensions, "$schema") + + for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { + delete(x.Extensions, pn) + } + if len(x.Extensions) == 0 { + x.Extensions = nil + } + + s.ExtraProps = x.Extensions.sanitizeWithExtra() + s.Extensions = internal.SanitizeExtensions(x.Extensions) + s.SchemaProps = x.SchemaProps + s.SwaggerSchemaProps = x.SwaggerSchemaProps + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go index 563b9b95e..e2b7da14c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go @@ -18,6 +18,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section @@ -44,6 +46,9 @@ type SecurityScheme struct { // MarshalJSON marshal this to JSON func (s SecurityScheme) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.SecuritySchemeProps) if err != nil { return nil, err @@ -55,6 +60,16 @@ func (s SecurityScheme) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (s SecurityScheme) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + SecuritySchemeProps + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.SecuritySchemeProps = s.SecuritySchemeProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON marshal this from JSON func (s *SecurityScheme) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { @@ -62,3 +77,16 @@ func (s *SecurityScheme) UnmarshalJSON(data []byte) error { } return json.Unmarshal(data, &s.VendorExtensible) } + +func (s *SecurityScheme) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + Extensions + SecuritySchemeProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + s.Extensions = internal.SanitizeExtensions(x.Extensions) + s.SecuritySchemeProps = x.SecuritySchemeProps + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go index be66d1ddd..c8f3beaa3 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go @@ -19,6 +19,8 @@ import ( "fmt" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // Swagger this is the root document object for the API specification. @@ -33,6 +35,9 @@ type Swagger struct { // MarshalJSON marshals this swagger structure to json func (s Swagger) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.SwaggerProps) if err != nil { return nil, err @@ -44,8 +49,22 @@ func (s Swagger) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +// MarshalJSON marshals this swagger structure to json +func (s Swagger) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + SwaggerProps + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.SwaggerProps = s.SwaggerProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON unmarshals a swagger spec from json func (s *Swagger) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, s) + } var sw Swagger if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { return err @@ -57,6 +76,23 @@ func (s *Swagger) UnmarshalJSON(data []byte) error { return nil } +func (s *Swagger) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + // Note: If you're willing to make breaking changes, it is possible to + // optimize this and other usages of this pattern: + // https://github.com/kubernetes/kube-openapi/pull/319#discussion_r983165948 + var x struct { + Extensions + SwaggerProps + } + + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + s.Extensions = internal.SanitizeExtensions(x.Extensions) + s.SwaggerProps = x.SwaggerProps + return nil +} + // SwaggerProps captures the top-level properties of an Api specification // // NOTE: validation rules @@ -96,6 +132,9 @@ var jsFalse = []byte("false") // MarshalJSON convert this object to JSON func (s SchemaOrBool) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } if s.Schema != nil { return json.Marshal(s.Schema) } @@ -106,23 +145,59 @@ func (s SchemaOrBool) MarshalJSON() ([]byte, error) { return jsTrue, nil } +// MarshalJSON convert this object to JSON +func (s SchemaOrBool) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + if s.Schema != nil { + return opts.MarshalNext(enc, s.Schema) + } + + if s.Schema == nil && !s.Allows { + return enc.WriteToken(jsonv2.False) + } + return enc.WriteToken(jsonv2.True) +} + // UnmarshalJSON converts this bool or schema object from a JSON structure func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, s) + } + var nw SchemaOrBool - if len(data) >= 4 { - if data[0] == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch + if len(data) > 0 && data[0] == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err } - nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') + nw.Schema = &sch + nw.Allows = true + } else { + json.Unmarshal(data, &nw.Allows) } *s = nw return nil } +func (s *SchemaOrBool) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + switch k := dec.PeekKind(); k { + case '{': + err := opts.UnmarshalNext(dec, &s.Schema) + if err != nil { + return err + } + s.Allows = true + return nil + case 't', 'f': + err := opts.UnmarshalNext(dec, &s.Allows) + if err != nil { + return err + } + return nil + default: + return fmt.Errorf("expected object or bool, not '%v'", k.String()) + } +} + // SchemaOrStringArray represents a schema or a string array type SchemaOrStringArray struct { Schema *Schema @@ -131,6 +206,9 @@ type SchemaOrStringArray struct { // MarshalJSON converts this schema object or array into JSON structure func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } if len(s.Property) > 0 { return json.Marshal(s.Property) } @@ -140,8 +218,23 @@ func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { return []byte("null"), nil } +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrStringArray) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + if len(s.Property) > 0 { + return opts.MarshalNext(enc, s.Property) + } + if s.Schema != nil { + return opts.MarshalNext(enc, s.Schema) + } + return enc.WriteToken(jsonv2.Null) +} + // UnmarshalJSON converts this schema object or array from a JSON structure func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, s) + } + var first byte if len(data) > 1 { first = data[0] @@ -163,6 +256,18 @@ func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { return nil } +func (s *SchemaOrStringArray) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + switch dec.PeekKind() { + case '{': + return opts.UnmarshalNext(dec, &s.Schema) + case '[': + return opts.UnmarshalNext(dec, &s.Property) + default: + _, err := dec.ReadValue() + return err + } +} + // Definitions contains the models explicitly defined in this spec // An object to hold data types that can be consumed and produced by operations. // These data types can be primitives, arrays or models. @@ -193,6 +298,10 @@ func (s StringOrArray) Contains(value string) bool { // UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string func (s *StringOrArray) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, s) + } + var first byte if len(data) > 1 { first = data[0] @@ -223,6 +332,23 @@ func (s *StringOrArray) UnmarshalJSON(data []byte) error { } } +func (s *StringOrArray) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + switch k := dec.PeekKind(); k { + case '[': + *s = StringOrArray{} + return opts.UnmarshalNext(dec, (*[]string)(s)) + case '"': + *s = StringOrArray{""} + return opts.UnmarshalNext(dec, &(*s)[0]) + case 'n': + // Throw out null token + _, _ = dec.ReadToken() + return nil + default: + return fmt.Errorf("expected string or array, not '%v'", k.String()) + } +} + // MarshalJSON converts this string or array to a JSON array or JSON string func (s StringOrArray) MarshalJSON() ([]byte, error) { if len(s) == 1 { @@ -256,14 +382,29 @@ func (s *SchemaOrArray) ContainsType(name string) bool { // MarshalJSON converts this schema object or array into JSON structure func (s SchemaOrArray) MarshalJSON() ([]byte, error) { - if len(s.Schemas) > 0 { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } + if s.Schemas != nil { return json.Marshal(s.Schemas) } return json.Marshal(s.Schema) } +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrArray) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + if s.Schemas != nil { + return opts.MarshalNext(enc, s.Schemas) + } + return opts.MarshalNext(enc, s.Schema) +} + // UnmarshalJSON converts this schema object or array from a JSON structure func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, s) + } + var nw SchemaOrArray var first byte if len(data) > 1 { @@ -284,3 +425,15 @@ func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { *s = nw return nil } + +func (s *SchemaOrArray) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + switch dec.PeekKind() { + case '{': + return opts.UnmarshalNext(dec, &s.Schema) + case '[': + return opts.UnmarshalNext(dec, &s.Schemas) + default: + _, err := dec.ReadValue() + return err + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go index ddd1eac7e..d105d52ca 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go @@ -18,6 +18,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // TagProps describe a tag entry in the top level tags section of a swagger spec @@ -39,6 +41,9 @@ type Tag struct { // MarshalJSON marshal this to JSON func (t Tag) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(t) + } b1, err := json.Marshal(t.TagProps) if err != nil { return nil, err @@ -50,10 +55,37 @@ func (t Tag) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (t Tag) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + TagProps + } + x.Extensions = internal.SanitizeExtensions(t.Extensions) + x.TagProps = t.TagProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON marshal this from JSON func (t *Tag) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, t) + } + if err := json.Unmarshal(data, &t.TagProps); err != nil { return err } return json.Unmarshal(data, &t.VendorExtensible) } + +func (t *Tag) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + Extensions + TagProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + t.Extensions = internal.SanitizeExtensions(x.Extensions) + t.TagProps = x.TagProps + return nil +} diff --git a/vendor/k8s.io/utils/net/ipfamily.go b/vendor/k8s.io/utils/net/ipfamily.go new file mode 100644 index 000000000..1a51fa391 --- /dev/null +++ b/vendor/k8s.io/utils/net/ipfamily.go @@ -0,0 +1,181 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "fmt" + "net" +) + +// IPFamily refers to a specific family if not empty, i.e. "4" or "6". +type IPFamily string + +// Constants for valid IPFamilys: +const ( + IPFamilyUnknown IPFamily = "" + + IPv4 IPFamily = "4" + IPv6 IPFamily = "6" +) + +// IsDualStackIPs returns true if: +// - all elements of ips are valid +// - at least one IP from each family (v4 and v6) is present +func IsDualStackIPs(ips []net.IP) (bool, error) { + v4Found := false + v6Found := false + for i, ip := range ips { + switch IPFamilyOf(ip) { + case IPv4: + v4Found = true + case IPv6: + v6Found = true + default: + return false, fmt.Errorf("invalid IP[%d]: %v", i, ip) + } + } + + return (v4Found && v6Found), nil +} + +// IsDualStackIPStrings returns true if: +// - all elements of ips can be parsed as IPs +// - at least one IP from each family (v4 and v6) is present +func IsDualStackIPStrings(ips []string) (bool, error) { + parsedIPs := make([]net.IP, 0, len(ips)) + for i, ip := range ips { + parsedIP := ParseIPSloppy(ip) + if parsedIP == nil { + return false, fmt.Errorf("invalid IP[%d]: %v", i, ip) + } + parsedIPs = append(parsedIPs, parsedIP) + } + return IsDualStackIPs(parsedIPs) +} + +// IsDualStackCIDRs returns true if: +// - all elements of cidrs are non-nil +// - at least one CIDR from each family (v4 and v6) is present +func IsDualStackCIDRs(cidrs []*net.IPNet) (bool, error) { + v4Found := false + v6Found := false + for i, cidr := range cidrs { + switch IPFamilyOfCIDR(cidr) { + case IPv4: + v4Found = true + case IPv6: + v6Found = true + default: + return false, fmt.Errorf("invalid CIDR[%d]: %v", i, cidr) + } + } + + return (v4Found && v6Found), nil +} + +// IsDualStackCIDRStrings returns if +// - all elements of cidrs can be parsed as CIDRs +// - at least one CIDR from each family (v4 and v6) is present +func IsDualStackCIDRStrings(cidrs []string) (bool, error) { + parsedCIDRs, err := ParseCIDRs(cidrs) + if err != nil { + return false, err + } + return IsDualStackCIDRs(parsedCIDRs) +} + +// IPFamilyOf returns the IP family of ip, or IPFamilyUnknown if it is invalid. +func IPFamilyOf(ip net.IP) IPFamily { + switch { + case ip.To4() != nil: + return IPv4 + case ip.To16() != nil: + return IPv6 + default: + return IPFamilyUnknown + } +} + +// IPFamilyOfString returns the IP family of ip, or IPFamilyUnknown if ip cannot +// be parsed as an IP. +func IPFamilyOfString(ip string) IPFamily { + return IPFamilyOf(ParseIPSloppy(ip)) +} + +// IPFamilyOfCIDR returns the IP family of cidr. +func IPFamilyOfCIDR(cidr *net.IPNet) IPFamily { + if cidr == nil { + return IPFamilyUnknown + } + return IPFamilyOf(cidr.IP) +} + +// IPFamilyOfCIDRString returns the IP family of cidr. +func IPFamilyOfCIDRString(cidr string) IPFamily { + ip, _, _ := ParseCIDRSloppy(cidr) + return IPFamilyOf(ip) +} + +// IsIPv6 returns true if netIP is IPv6 (and false if it is IPv4, nil, or invalid). +func IsIPv6(netIP net.IP) bool { + return IPFamilyOf(netIP) == IPv6 +} + +// IsIPv6String returns true if ip contains a single IPv6 address and nothing else. It +// returns false if ip is an empty string, an IPv4 address, or anything else that is not a +// single IPv6 address. +func IsIPv6String(ip string) bool { + return IPFamilyOfString(ip) == IPv6 +} + +// IsIPv6CIDR returns true if a cidr is a valid IPv6 CIDR. It returns false if cidr is +// nil or an IPv4 CIDR. Its behavior is not defined if cidr is invalid. +func IsIPv6CIDR(cidr *net.IPNet) bool { + return IPFamilyOfCIDR(cidr) == IPv6 +} + +// IsIPv6CIDRString returns true if cidr contains a single IPv6 CIDR and nothing else. It +// returns false if cidr is an empty string, an IPv4 CIDR, or anything else that is not a +// single valid IPv6 CIDR. +func IsIPv6CIDRString(cidr string) bool { + return IPFamilyOfCIDRString(cidr) == IPv6 +} + +// IsIPv4 returns true if netIP is IPv4 (and false if it is IPv6, nil, or invalid). +func IsIPv4(netIP net.IP) bool { + return IPFamilyOf(netIP) == IPv4 +} + +// IsIPv4String returns true if ip contains a single IPv4 address and nothing else. It +// returns false if ip is an empty string, an IPv6 address, or anything else that is not a +// single IPv4 address. +func IsIPv4String(ip string) bool { + return IPFamilyOfString(ip) == IPv4 +} + +// IsIPv4CIDR returns true if cidr is a valid IPv4 CIDR. It returns false if cidr is nil +// or an IPv6 CIDR. Its behavior is not defined if cidr is invalid. +func IsIPv4CIDR(cidr *net.IPNet) bool { + return IPFamilyOfCIDR(cidr) == IPv4 +} + +// IsIPv4CIDRString returns true if cidr contains a single IPv4 CIDR and nothing else. It +// returns false if cidr is an empty string, an IPv6 CIDR, or anything else that is not a +// single valid IPv4 CIDR. +func IsIPv4CIDRString(cidr string) bool { + return IPFamilyOfCIDRString(cidr) == IPv4 +} diff --git a/vendor/k8s.io/utils/net/net.go b/vendor/k8s.io/utils/net/net.go index b7c08e2e0..704c1f232 100644 --- a/vendor/k8s.io/utils/net/net.go +++ b/vendor/k8s.io/utils/net/net.go @@ -29,138 +29,16 @@ import ( // order is maintained func ParseCIDRs(cidrsString []string) ([]*net.IPNet, error) { cidrs := make([]*net.IPNet, 0, len(cidrsString)) - for _, cidrString := range cidrsString { + for i, cidrString := range cidrsString { _, cidr, err := ParseCIDRSloppy(cidrString) if err != nil { - return nil, fmt.Errorf("failed to parse cidr value:%q with error:%v", cidrString, err) + return nil, fmt.Errorf("invalid CIDR[%d]: %v (%v)", i, cidr, err) } cidrs = append(cidrs, cidr) } return cidrs, nil } -// IsDualStackIPs returns if a slice of ips is: -// - all are valid ips -// - at least one ip from each family (v4 or v6) -func IsDualStackIPs(ips []net.IP) (bool, error) { - v4Found := false - v6Found := false - for _, ip := range ips { - if ip == nil { - return false, fmt.Errorf("ip %v is invalid", ip) - } - - if v4Found && v6Found { - continue - } - - if IsIPv6(ip) { - v6Found = true - continue - } - - v4Found = true - } - - return (v4Found && v6Found), nil -} - -// IsDualStackIPStrings returns if -// - all are valid ips -// - at least one ip from each family (v4 or v6) -func IsDualStackIPStrings(ips []string) (bool, error) { - parsedIPs := make([]net.IP, 0, len(ips)) - for _, ip := range ips { - parsedIP := ParseIPSloppy(ip) - parsedIPs = append(parsedIPs, parsedIP) - } - return IsDualStackIPs(parsedIPs) -} - -// IsDualStackCIDRs returns if -// - all are valid cidrs -// - at least one cidr from each family (v4 or v6) -func IsDualStackCIDRs(cidrs []*net.IPNet) (bool, error) { - v4Found := false - v6Found := false - for _, cidr := range cidrs { - if cidr == nil { - return false, fmt.Errorf("cidr %v is invalid", cidr) - } - - if v4Found && v6Found { - continue - } - - if IsIPv6(cidr.IP) { - v6Found = true - continue - } - v4Found = true - } - - return v4Found && v6Found, nil -} - -// IsDualStackCIDRStrings returns if -// - all are valid cidrs -// - at least one cidr from each family (v4 or v6) -func IsDualStackCIDRStrings(cidrs []string) (bool, error) { - parsedCIDRs, err := ParseCIDRs(cidrs) - if err != nil { - return false, err - } - return IsDualStackCIDRs(parsedCIDRs) -} - -// IsIPv6 returns if netIP is IPv6. -func IsIPv6(netIP net.IP) bool { - return netIP != nil && netIP.To4() == nil -} - -// IsIPv6String returns if ip is IPv6. -func IsIPv6String(ip string) bool { - netIP := ParseIPSloppy(ip) - return IsIPv6(netIP) -} - -// IsIPv6CIDRString returns if cidr is IPv6. -// This assumes cidr is a valid CIDR. -func IsIPv6CIDRString(cidr string) bool { - ip, _, _ := ParseCIDRSloppy(cidr) - return IsIPv6(ip) -} - -// IsIPv6CIDR returns if a cidr is ipv6 -func IsIPv6CIDR(cidr *net.IPNet) bool { - ip := cidr.IP - return IsIPv6(ip) -} - -// IsIPv4 returns if netIP is IPv4. -func IsIPv4(netIP net.IP) bool { - return netIP != nil && netIP.To4() != nil -} - -// IsIPv4String returns if ip is IPv4. -func IsIPv4String(ip string) bool { - netIP := ParseIPSloppy(ip) - return IsIPv4(netIP) -} - -// IsIPv4CIDR returns if a cidr is ipv4 -func IsIPv4CIDR(cidr *net.IPNet) bool { - ip := cidr.IP - return IsIPv4(ip) -} - -// IsIPv4CIDRString returns if cidr is IPv4. -// This assumes cidr is a valid CIDR. -func IsIPv4CIDRString(cidr string) bool { - ip, _, _ := ParseCIDRSloppy(cidr) - return IsIPv4(ip) -} - // ParsePort parses a string representing an IP port. If the string is not a // valid port number, this returns an error. func ParsePort(port string, allowZero bool) (int, error) { diff --git a/vendor/k8s.io/utils/net/port.go b/vendor/k8s.io/utils/net/port.go index 7ac04f0dc..c6a53fa02 100644 --- a/vendor/k8s.io/utils/net/port.go +++ b/vendor/k8s.io/utils/net/port.go @@ -23,15 +23,6 @@ import ( "strings" ) -// IPFamily refers to a specific family if not empty, i.e. "4" or "6". -type IPFamily string - -// Constants for valid IPFamilys: -const ( - IPv4 IPFamily = "4" - IPv6 = "6" -) - // Protocol is a network protocol support by LocalPort. type Protocol string @@ -67,7 +58,7 @@ func NewLocalPort(desc, ip string, ipFamily IPFamily, port int, protocol Protoco if protocol != TCP && protocol != UDP { return nil, fmt.Errorf("Unsupported protocol %s", protocol) } - if ipFamily != "" && ipFamily != "4" && ipFamily != "6" { + if ipFamily != IPFamilyUnknown && ipFamily != IPv4 && ipFamily != IPv6 { return nil, fmt.Errorf("Invalid IP family %s", ipFamily) } if ip != "" { @@ -75,9 +66,10 @@ func NewLocalPort(desc, ip string, ipFamily IPFamily, port int, protocol Protoco if parsedIP == nil { return nil, fmt.Errorf("invalid ip address %s", ip) } - asIPv4 := parsedIP.To4() - if asIPv4 == nil && ipFamily == IPv4 || asIPv4 != nil && ipFamily == IPv6 { - return nil, fmt.Errorf("ip address and family mismatch %s, %s", ip, ipFamily) + if ipFamily != IPFamilyUnknown { + if IPFamily(parsedIP) != ipFamily { + return nil, fmt.Errorf("ip address and family mismatch %s, %s", ip, ipFamily) + } } } return &LocalPort{Description: desc, IP: ip, IPFamily: ipFamily, Port: port, Protocol: protocol}, nil diff --git a/vendor/k8s.io/utils/pointer/OWNERS b/vendor/k8s.io/utils/pointer/OWNERS new file mode 100644 index 000000000..0d6392752 --- /dev/null +++ b/vendor/k8s.io/utils/pointer/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- apelisse +- stewart-yu +- thockin +reviewers: +- apelisse +- stewart-yu +- thockin diff --git a/vendor/k8s.io/utils/pointer/README.md b/vendor/k8s.io/utils/pointer/README.md new file mode 100644 index 000000000..2ca8073dc --- /dev/null +++ b/vendor/k8s.io/utils/pointer/README.md @@ -0,0 +1,3 @@ +# Pointer + +This package provides some functions for pointer-based operations. diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go new file mode 100644 index 000000000..b8103223a --- /dev/null +++ b/vendor/k8s.io/utils/pointer/pointer.go @@ -0,0 +1,410 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pointer + +import ( + "fmt" + "reflect" + "time" +) + +// AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when, +// for example, an API struct is handled by plugins which need to distinguish +// "no plugin accepted this spec" from "this spec is empty". +// +// This function is only valid for structs and pointers to structs. Any other +// type will cause a panic. Passing a typed nil pointer will return true. +func AllPtrFieldsNil(obj interface{}) bool { + v := reflect.ValueOf(obj) + if !v.IsValid() { + panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj)) + } + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return true + } + v = v.Elem() + } + for i := 0; i < v.NumField(); i++ { + if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() { + return false + } + } + return true +} + +// Int returns a pointer to an int +func Int(i int) *int { + return &i +} + +// IntPtr is a function variable referring to Int. +// +// Deprecated: Use Int instead. +var IntPtr = Int // for back-compat + +// IntDeref dereferences the int ptr and returns it if not nil, or else +// returns def. +func IntDeref(ptr *int, def int) int { + if ptr != nil { + return *ptr + } + return def +} + +// IntPtrDerefOr is a function variable referring to IntDeref. +// +// Deprecated: Use IntDeref instead. +var IntPtrDerefOr = IntDeref // for back-compat + +// Int32 returns a pointer to an int32. +func Int32(i int32) *int32 { + return &i +} + +// Int32Ptr is a function variable referring to Int32. +// +// Deprecated: Use Int32 instead. +var Int32Ptr = Int32 // for back-compat + +// Int32Deref dereferences the int32 ptr and returns it if not nil, or else +// returns def. +func Int32Deref(ptr *int32, def int32) int32 { + if ptr != nil { + return *ptr + } + return def +} + +// Int32PtrDerefOr is a function variable referring to Int32Deref. +// +// Deprecated: Use Int32Deref instead. +var Int32PtrDerefOr = Int32Deref // for back-compat + +// Int32Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +func Int32Equal(a, b *int32) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + +// Uint returns a pointer to an uint +func Uint(i uint) *uint { + return &i +} + +// UintPtr is a function variable referring to Uint. +// +// Deprecated: Use Uint instead. +var UintPtr = Uint // for back-compat + +// UintDeref dereferences the uint ptr and returns it if not nil, or else +// returns def. +func UintDeref(ptr *uint, def uint) uint { + if ptr != nil { + return *ptr + } + return def +} + +// UintPtrDerefOr is a function variable referring to UintDeref. +// +// Deprecated: Use UintDeref instead. +var UintPtrDerefOr = UintDeref // for back-compat + +// Uint32 returns a pointer to an uint32. +func Uint32(i uint32) *uint32 { + return &i +} + +// Uint32Ptr is a function variable referring to Uint32. +// +// Deprecated: Use Uint32 instead. +var Uint32Ptr = Uint32 // for back-compat + +// Uint32Deref dereferences the uint32 ptr and returns it if not nil, or else +// returns def. +func Uint32Deref(ptr *uint32, def uint32) uint32 { + if ptr != nil { + return *ptr + } + return def +} + +// Uint32PtrDerefOr is a function variable referring to Uint32Deref. +// +// Deprecated: Use Uint32Deref instead. +var Uint32PtrDerefOr = Uint32Deref // for back-compat + +// Uint32Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +func Uint32Equal(a, b *uint32) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + +// Int64 returns a pointer to an int64. +func Int64(i int64) *int64 { + return &i +} + +// Int64Ptr is a function variable referring to Int64. +// +// Deprecated: Use Int64 instead. +var Int64Ptr = Int64 // for back-compat + +// Int64Deref dereferences the int64 ptr and returns it if not nil, or else +// returns def. +func Int64Deref(ptr *int64, def int64) int64 { + if ptr != nil { + return *ptr + } + return def +} + +// Int64PtrDerefOr is a function variable referring to Int64Deref. +// +// Deprecated: Use Int64Deref instead. +var Int64PtrDerefOr = Int64Deref // for back-compat + +// Int64Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +func Int64Equal(a, b *int64) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + +// Uint64 returns a pointer to an uint64. +func Uint64(i uint64) *uint64 { + return &i +} + +// Uint64Ptr is a function variable referring to Uint64. +// +// Deprecated: Use Uint64 instead. +var Uint64Ptr = Uint64 // for back-compat + +// Uint64Deref dereferences the uint64 ptr and returns it if not nil, or else +// returns def. +func Uint64Deref(ptr *uint64, def uint64) uint64 { + if ptr != nil { + return *ptr + } + return def +} + +// Uint64PtrDerefOr is a function variable referring to Uint64Deref. +// +// Deprecated: Use Uint64Deref instead. +var Uint64PtrDerefOr = Uint64Deref // for back-compat + +// Uint64Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +func Uint64Equal(a, b *uint64) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + +// Bool returns a pointer to a bool. +func Bool(b bool) *bool { + return &b +} + +// BoolPtr is a function variable referring to Bool. +// +// Deprecated: Use Bool instead. +var BoolPtr = Bool // for back-compat + +// BoolDeref dereferences the bool ptr and returns it if not nil, or else +// returns def. +func BoolDeref(ptr *bool, def bool) bool { + if ptr != nil { + return *ptr + } + return def +} + +// BoolPtrDerefOr is a function variable referring to BoolDeref. +// +// Deprecated: Use BoolDeref instead. +var BoolPtrDerefOr = BoolDeref // for back-compat + +// BoolEqual returns true if both arguments are nil or both arguments +// dereference to the same value. +func BoolEqual(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + +// String returns a pointer to a string. +func String(s string) *string { + return &s +} + +// StringPtr is a function variable referring to String. +// +// Deprecated: Use String instead. +var StringPtr = String // for back-compat + +// StringDeref dereferences the string ptr and returns it if not nil, or else +// returns def. +func StringDeref(ptr *string, def string) string { + if ptr != nil { + return *ptr + } + return def +} + +// StringPtrDerefOr is a function variable referring to StringDeref. +// +// Deprecated: Use StringDeref instead. +var StringPtrDerefOr = StringDeref // for back-compat + +// StringEqual returns true if both arguments are nil or both arguments +// dereference to the same value. +func StringEqual(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + +// Float32 returns a pointer to a float32. +func Float32(i float32) *float32 { + return &i +} + +// Float32Ptr is a function variable referring to Float32. +// +// Deprecated: Use Float32 instead. +var Float32Ptr = Float32 + +// Float32Deref dereferences the float32 ptr and returns it if not nil, or else +// returns def. +func Float32Deref(ptr *float32, def float32) float32 { + if ptr != nil { + return *ptr + } + return def +} + +// Float32PtrDerefOr is a function variable referring to Float32Deref. +// +// Deprecated: Use Float32Deref instead. +var Float32PtrDerefOr = Float32Deref // for back-compat + +// Float32Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +func Float32Equal(a, b *float32) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + +// Float64 returns a pointer to a float64. +func Float64(i float64) *float64 { + return &i +} + +// Float64Ptr is a function variable referring to Float64. +// +// Deprecated: Use Float64 instead. +var Float64Ptr = Float64 + +// Float64Deref dereferences the float64 ptr and returns it if not nil, or else +// returns def. +func Float64Deref(ptr *float64, def float64) float64 { + if ptr != nil { + return *ptr + } + return def +} + +// Float64PtrDerefOr is a function variable referring to Float64Deref. +// +// Deprecated: Use Float64Deref instead. +var Float64PtrDerefOr = Float64Deref // for back-compat + +// Float64Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +func Float64Equal(a, b *float64) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + +// Duration returns a pointer to a time.Duration. +func Duration(d time.Duration) *time.Duration { + return &d +} + +// DurationDeref dereferences the time.Duration ptr and returns it if not nil, or else +// returns def. +func DurationDeref(ptr *time.Duration, def time.Duration) time.Duration { + if ptr != nil { + return *ptr + } + return def +} + +// DurationEqual returns true if both arguments are nil or both arguments +// dereference to the same value. +func DurationEqual(a, b *time.Duration) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go index 3023d1066..187eb5d8c 100644 --- a/vendor/k8s.io/utils/trace/trace.go +++ b/vendor/k8s.io/utils/trace/trace.go @@ -21,6 +21,7 @@ import ( "context" "fmt" "math/rand" + "sync" "time" "k8s.io/klog/v2" @@ -64,6 +65,11 @@ func durationToMilliseconds(timeDuration time.Duration) int64 { } type traceItem interface { + // rLock must be called before invoking time or writeItem. + rLock() + // rUnlock must be called after processing the item is complete. + rUnlock() + // time returns when the trace was recorded as completed. time() time.Time // writeItem outputs the traceItem to the buffer. If stepThreshold is non-nil, only output the @@ -78,6 +84,10 @@ type traceStep struct { fields []Field } +// rLock doesn't need to do anything because traceStep instances are immutable. +func (s traceStep) rLock() {} +func (s traceStep) rUnlock() {} + func (s traceStep) time() time.Time { return s.stepTime } @@ -93,13 +103,24 @@ func (s traceStep) writeItem(b *bytes.Buffer, formatter string, startTime time.T // Trace keeps track of a set of "steps" and allows us to log a specific // step if it took longer than its share of the total allowed time type Trace struct { + // constant fields name string fields []Field - threshold *time.Duration startTime time.Time - endTime *time.Time - traceItems []traceItem parentTrace *Trace + // fields guarded by a lock + lock sync.RWMutex + threshold *time.Duration + endTime *time.Time + traceItems []traceItem +} + +func (t *Trace) rLock() { + t.lock.RLock() +} + +func (t *Trace) rUnlock() { + t.lock.RUnlock() } func (t *Trace) time() time.Time { @@ -138,6 +159,8 @@ func New(name string, fields ...Field) *Trace { // how long it took. The Fields add key value pairs to provide additional details about the trace // step. func (t *Trace) Step(msg string, fields ...Field) { + t.lock.Lock() + defer t.lock.Unlock() if t.traceItems == nil { // traces almost always have less than 6 steps, do this to avoid more than a single allocation t.traceItems = make([]traceItem, 0, 6) @@ -153,7 +176,9 @@ func (t *Trace) Nest(msg string, fields ...Field) *Trace { newTrace := New(msg, fields...) if t != nil { newTrace.parentTrace = t + t.lock.Lock() t.traceItems = append(t.traceItems, newTrace) + t.lock.Unlock() } return newTrace } @@ -163,7 +188,9 @@ func (t *Trace) Nest(msg string, fields ...Field) *Trace { // is logged. func (t *Trace) Log() { endTime := time.Now() + t.lock.Lock() t.endTime = &endTime + t.lock.Unlock() // an explicit logging request should dump all the steps out at the higher level if t.parentTrace == nil { // We don't start logging until Log or LogIfLong is called on the root trace t.logTrace() @@ -178,13 +205,17 @@ func (t *Trace) Log() { // If the Trace is nested it is not immediately logged. Instead, it is logged when the trace it // is nested within is logged. func (t *Trace) LogIfLong(threshold time.Duration) { + t.lock.Lock() t.threshold = &threshold + t.lock.Unlock() t.Log() } // logTopLevelTraces finds all traces in a hierarchy of nested traces that should be logged but do not have any // parents that will be logged, due to threshold limits, and logs them as top level traces. func (t *Trace) logTrace() { + t.lock.RLock() + defer t.lock.RUnlock() if t.durationIsWithinThreshold() { var buffer bytes.Buffer traceNum := rand.Int31() @@ -217,8 +248,10 @@ func (t *Trace) logTrace() { func (t *Trace) writeTraceSteps(b *bytes.Buffer, formatter string, stepThreshold *time.Duration) { lastStepTime := t.startTime for _, stepOrTrace := range t.traceItems { + stepOrTrace.rLock() stepOrTrace.writeItem(b, formatter, lastStepTime, stepThreshold) lastStepTime = stepOrTrace.time() + stepOrTrace.rUnlock() } } @@ -244,9 +277,13 @@ func (t *Trace) calculateStepThreshold() *time.Duration { traceThreshold := *t.threshold for _, s := range t.traceItems { nestedTrace, ok := s.(*Trace) - if ok && nestedTrace.threshold != nil { - traceThreshold = traceThreshold - *nestedTrace.threshold - lenTrace-- + if ok { + nestedTrace.lock.RLock() + if nestedTrace.threshold != nil { + traceThreshold = traceThreshold - *nestedTrace.threshold + lenTrace-- + } + nestedTrace.lock.RUnlock() } } diff --git a/vendor/modules.txt b/vendor/modules.txt index 4e362e0cf..135831a0e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,12 +1,6 @@ # github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible ## explicit github.com/Knetic/govaluate -# github.com/PuerkitoBio/purell v1.1.1 -## explicit -github.com/PuerkitoBio/purell -# github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 -## explicit -github.com/PuerkitoBio/urlesc # github.com/benbjohnson/clock v1.3.0 ## explicit; go 1.15 github.com/benbjohnson/clock @@ -22,39 +16,40 @@ github.com/davecgh/go-spew/spew # github.com/dustin/go-humanize v1.0.1 ## explicit; go 1.16 github.com/dustin/go-humanize -# github.com/emicklei/go-restful v2.16.0+incompatible -## explicit -github.com/emicklei/go-restful -github.com/emicklei/go-restful/log -# github.com/evanphx/json-patch v4.12.0+incompatible -## explicit -github.com/evanphx/json-patch -# github.com/fsnotify/fsnotify v1.5.1 +# github.com/emicklei/go-restful/v3 v3.10.2 ## explicit; go 1.13 +github.com/emicklei/go-restful/v3 +github.com/emicklei/go-restful/v3/log +# github.com/evanphx/json-patch/v5 v5.6.0 +## explicit; go 1.12 +github.com/evanphx/json-patch/v5 +# github.com/fsnotify/fsnotify v1.6.0 +## explicit; go 1.16 github.com/fsnotify/fsnotify # github.com/go-kit/kit v0.12.0 ## explicit; go 1.17 github.com/go-kit/kit/log github.com/go-kit/kit/log/level github.com/go-kit/kit/log/logrus -# github.com/go-kit/log v0.2.0 +# github.com/go-kit/log v0.2.1 ## explicit; go 1.17 github.com/go-kit/log github.com/go-kit/log/level # github.com/go-logfmt/logfmt v0.5.1 ## explicit; go 1.17 github.com/go-logfmt/logfmt -# github.com/go-logr/logr v1.2.3 +# github.com/go-logr/logr v1.2.4 ## explicit; go 1.16 github.com/go-logr/logr -# github.com/go-openapi/jsonpointer v0.19.5 +# github.com/go-openapi/jsonpointer v0.19.6 ## explicit; go 1.13 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.19.5 +# github.com/go-openapi/jsonreference v0.20.2 ## explicit; go 1.13 github.com/go-openapi/jsonreference -# github.com/go-openapi/swag v0.19.14 -## explicit; go 1.11 +github.com/go-openapi/jsonreference/internal +# github.com/go-openapi/swag v0.22.3 +## explicit; go 1.18 github.com/go-openapi/swag # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 @@ -74,13 +69,13 @@ github.com/golang/protobuf/ptypes/timestamp # github.com/golang/snappy v0.0.4 ## explicit github.com/golang/snappy -# github.com/google/gnostic v0.5.7-v3refs -## explicit; go 1.12 -github.com/google/gnostic/compiler -github.com/google/gnostic/extensions -github.com/google/gnostic/jsonschema -github.com/google/gnostic/openapiv2 -github.com/google/gnostic/openapiv3 +# github.com/google/gnostic-models v0.6.8 +## explicit; go 1.18 +github.com/google/gnostic-models/compiler +github.com/google/gnostic-models/extensions +github.com/google/gnostic-models/jsonschema +github.com/google/gnostic-models/openapiv2 +github.com/google/gnostic-models/openapiv3 # github.com/google/go-cmp v0.5.9 ## explicit; go 1.13 github.com/google/go-cmp/cmp @@ -96,9 +91,10 @@ github.com/google/go-jsonnet/astgen github.com/google/go-jsonnet/internal/errors github.com/google/go-jsonnet/internal/parser github.com/google/go-jsonnet/internal/program -# github.com/google/gofuzz v1.1.0 +# github.com/google/gofuzz v1.2.0 ## explicit; go 1.12 github.com/google/gofuzz +github.com/google/gofuzz/bytesource # github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid @@ -117,7 +113,7 @@ github.com/hashicorp/hcl/json/token # github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb ## explicit github.com/heptiolabs/healthcheck -# github.com/imdario/mergo v0.3.12 +# github.com/imdario/mergo v0.3.15 ## explicit; go 1.13 github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.1.0 @@ -151,15 +147,13 @@ github.com/klauspost/compress/zstd/internal/xxhash # github.com/klauspost/cpuid/v2 v2.2.5 ## explicit; go 1.15 github.com/klauspost/cpuid/v2 -# github.com/kr/pretty v0.3.0 -## explicit; go 1.12 # github.com/libp2p/go-reuseport v0.1.0 ## explicit; go 1.16 github.com/libp2p/go-reuseport # github.com/magiconair/properties v1.8.5 ## explicit; go 1.13 github.com/magiconair/properties -# github.com/mailru/easyjson v0.7.6 +# github.com/mailru/easyjson v0.7.7 ## explicit; go 1.12 github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer @@ -167,7 +161,7 @@ github.com/mailru/easyjson/jwriter # github.com/mariomac/guara v0.0.0-20220523124851-5fc279816f1f ## explicit; go 1.17 github.com/mariomac/guara/pkg/test -# github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 +# github.com/matttproud/golang_protobuf_extensions v1.0.4 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 @@ -195,6 +189,10 @@ github.com/minio/sha256-simd # github.com/mitchellh/mapstructure v1.4.3 ## explicit; go 1.14 github.com/mitchellh/mapstructure +# github.com/moby/spdystream v0.2.0 +## explicit; go 1.13 +github.com/moby/spdystream +github.com/moby/spdystream/spdy # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent @@ -288,23 +286,23 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/prometheus/client_golang v1.12.1 -## explicit; go 1.13 +# github.com/prometheus/client_golang v1.15.1 +## explicit; go 1.17 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.2.0 -## explicit; go 1.9 +# github.com/prometheus/client_model v0.4.0 +## explicit; go 1.18 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.32.1 => github.com/netobserv/prometheus-common v0.31.2-0.20220720134304-43e74fd22881 -## explicit; go 1.13 +# github.com/prometheus/common v0.44.0 => github.com/netobserv/prometheus-common v0.44.0-netobserv +## explicit; go 1.18 github.com/prometheus/common/config github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model github.com/prometheus/common/version -# github.com/prometheus/procfs v0.7.3 -## explicit; go 1.13 +# github.com/prometheus/procfs v0.9.0 +## explicit; go 1.18 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util @@ -397,7 +395,7 @@ github.com/spf13/viper/internal/encoding/yaml # github.com/stretchr/objx v0.5.0 ## explicit; go 1.12 github.com/stretchr/objx -# github.com/stretchr/testify v1.8.1 +# github.com/stretchr/testify v1.8.2 ## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/mock @@ -405,11 +403,12 @@ github.com/stretchr/testify/require # github.com/subosito/gotenv v1.2.0 ## explicit github.com/subosito/gotenv -# github.com/vladimirvivien/gexe v0.1.1 +# github.com/vladimirvivien/gexe v0.2.0 ## explicit; go 1.16 github.com/vladimirvivien/gexe github.com/vladimirvivien/gexe/exec github.com/vladimirvivien/gexe/fs +github.com/vladimirvivien/gexe/http github.com/vladimirvivien/gexe/prog github.com/vladimirvivien/gexe/vars # github.com/vmware/go-ipfix v0.5.13 @@ -441,10 +440,13 @@ golang.org/x/crypto/pbkdf2 ## explicit; go 1.17 golang.org/x/net/context golang.org/x/net/http/httpguts +golang.org/x/net/http/httpproxy golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna +golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries +golang.org/x/net/proxy golang.org/x/net/publicsuffix golang.org/x/net/trace # golang.org/x/oauth2 v0.10.0 @@ -468,8 +470,7 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -golang.org/x/text/width -# golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 +# golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate # google.golang.org/appengine v1.6.7 @@ -574,8 +575,6 @@ google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/timestamppb # gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 ## explicit -# gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c -## explicit; go 1.11 # gopkg.in/inf.v0 v0.9.1 ## explicit gopkg.in/inf.v0 @@ -588,15 +587,18 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.24.0 -## explicit; go 1.16 +# k8s.io/api v0.28.2 +## explicit; go 1.20 k8s.io/api/admissionregistration/v1 +k8s.io/api/admissionregistration/v1alpha1 k8s.io/api/admissionregistration/v1beta1 +k8s.io/api/apidiscovery/v2beta1 k8s.io/api/apiserverinternal/v1alpha1 k8s.io/api/apps/v1 k8s.io/api/apps/v1beta1 k8s.io/api/apps/v1beta2 k8s.io/api/authentication/v1 +k8s.io/api/authentication/v1alpha1 k8s.io/api/authentication/v1beta1 k8s.io/api/authorization/v1 k8s.io/api/authorization/v1beta1 @@ -607,6 +609,7 @@ k8s.io/api/autoscaling/v2beta2 k8s.io/api/batch/v1 k8s.io/api/batch/v1beta1 k8s.io/api/certificates/v1 +k8s.io/api/certificates/v1alpha1 k8s.io/api/certificates/v1beta1 k8s.io/api/coordination/v1 k8s.io/api/coordination/v1beta1 @@ -619,7 +622,9 @@ k8s.io/api/extensions/v1beta1 k8s.io/api/flowcontrol/v1alpha1 k8s.io/api/flowcontrol/v1beta1 k8s.io/api/flowcontrol/v1beta2 +k8s.io/api/flowcontrol/v1beta3 k8s.io/api/networking/v1 +k8s.io/api/networking/v1alpha1 k8s.io/api/networking/v1beta1 k8s.io/api/node/v1 k8s.io/api/node/v1alpha1 @@ -629,21 +634,25 @@ k8s.io/api/policy/v1beta1 k8s.io/api/rbac/v1 k8s.io/api/rbac/v1alpha1 k8s.io/api/rbac/v1beta1 +k8s.io/api/resource/v1alpha2 k8s.io/api/scheduling/v1 k8s.io/api/scheduling/v1alpha1 k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.24.0 -## explicit; go 1.16 +# k8s.io/apimachinery v0.28.2 +## explicit; go 1.20 +k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/resource +k8s.io/apimachinery/pkg/api/validation k8s.io/apimachinery/pkg/apis/meta/internalversion k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured +k8s.io/apimachinery/pkg/apis/meta/v1/validation k8s.io/apimachinery/pkg/apis/meta/v1beta1 k8s.io/apimachinery/pkg/conversion k8s.io/apimachinery/pkg/conversion/queryparams @@ -661,14 +670,19 @@ k8s.io/apimachinery/pkg/selection k8s.io/apimachinery/pkg/types k8s.io/apimachinery/pkg/util/cache k8s.io/apimachinery/pkg/util/diff +k8s.io/apimachinery/pkg/util/dump k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/util/framer +k8s.io/apimachinery/pkg/util/httpstream +k8s.io/apimachinery/pkg/util/httpstream/spdy k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/util/managedfields +k8s.io/apimachinery/pkg/util/managedfields/internal k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/pkg/util/naming k8s.io/apimachinery/pkg/util/net +k8s.io/apimachinery/pkg/util/remotecommand k8s.io/apimachinery/pkg/util/runtime k8s.io/apimachinery/pkg/util/sets k8s.io/apimachinery/pkg/util/strategicpatch @@ -679,10 +693,12 @@ k8s.io/apimachinery/pkg/util/yaml k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json +k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v0.24.0 -## explicit; go 1.16 +# k8s.io/client-go v0.28.2 +## explicit; go 1.20 k8s.io/client-go/applyconfigurations/admissionregistration/v1 +k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1 k8s.io/client-go/applyconfigurations/apps/v1 @@ -695,6 +711,7 @@ k8s.io/client-go/applyconfigurations/autoscaling/v2beta2 k8s.io/client-go/applyconfigurations/batch/v1 k8s.io/client-go/applyconfigurations/batch/v1beta1 k8s.io/client-go/applyconfigurations/certificates/v1 +k8s.io/client-go/applyconfigurations/certificates/v1alpha1 k8s.io/client-go/applyconfigurations/certificates/v1beta1 k8s.io/client-go/applyconfigurations/coordination/v1 k8s.io/client-go/applyconfigurations/coordination/v1beta1 @@ -707,9 +724,11 @@ k8s.io/client-go/applyconfigurations/extensions/v1beta1 k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2 +k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3 k8s.io/client-go/applyconfigurations/internal k8s.io/client-go/applyconfigurations/meta/v1 k8s.io/client-go/applyconfigurations/networking/v1 +k8s.io/client-go/applyconfigurations/networking/v1alpha1 k8s.io/client-go/applyconfigurations/networking/v1beta1 k8s.io/client-go/applyconfigurations/node/v1 k8s.io/client-go/applyconfigurations/node/v1alpha1 @@ -719,6 +738,7 @@ k8s.io/client-go/applyconfigurations/policy/v1beta1 k8s.io/client-go/applyconfigurations/rbac/v1 k8s.io/client-go/applyconfigurations/rbac/v1alpha1 k8s.io/client-go/applyconfigurations/rbac/v1beta1 +k8s.io/client-go/applyconfigurations/resource/v1alpha2 k8s.io/client-go/applyconfigurations/scheduling/v1 k8s.io/client-go/applyconfigurations/scheduling/v1alpha1 k8s.io/client-go/applyconfigurations/scheduling/v1beta1 @@ -730,6 +750,7 @@ k8s.io/client-go/dynamic k8s.io/client-go/informers k8s.io/client-go/informers/admissionregistration k8s.io/client-go/informers/admissionregistration/v1 +k8s.io/client-go/informers/admissionregistration/v1alpha1 k8s.io/client-go/informers/admissionregistration/v1beta1 k8s.io/client-go/informers/apiserverinternal k8s.io/client-go/informers/apiserverinternal/v1alpha1 @@ -747,6 +768,7 @@ k8s.io/client-go/informers/batch/v1 k8s.io/client-go/informers/batch/v1beta1 k8s.io/client-go/informers/certificates k8s.io/client-go/informers/certificates/v1 +k8s.io/client-go/informers/certificates/v1alpha1 k8s.io/client-go/informers/certificates/v1beta1 k8s.io/client-go/informers/coordination k8s.io/client-go/informers/coordination/v1 @@ -765,9 +787,11 @@ k8s.io/client-go/informers/flowcontrol k8s.io/client-go/informers/flowcontrol/v1alpha1 k8s.io/client-go/informers/flowcontrol/v1beta1 k8s.io/client-go/informers/flowcontrol/v1beta2 +k8s.io/client-go/informers/flowcontrol/v1beta3 k8s.io/client-go/informers/internalinterfaces k8s.io/client-go/informers/networking k8s.io/client-go/informers/networking/v1 +k8s.io/client-go/informers/networking/v1alpha1 k8s.io/client-go/informers/networking/v1beta1 k8s.io/client-go/informers/node k8s.io/client-go/informers/node/v1 @@ -780,6 +804,8 @@ k8s.io/client-go/informers/rbac k8s.io/client-go/informers/rbac/v1 k8s.io/client-go/informers/rbac/v1alpha1 k8s.io/client-go/informers/rbac/v1beta1 +k8s.io/client-go/informers/resource +k8s.io/client-go/informers/resource/v1alpha2 k8s.io/client-go/informers/scheduling k8s.io/client-go/informers/scheduling/v1 k8s.io/client-go/informers/scheduling/v1alpha1 @@ -791,12 +817,14 @@ k8s.io/client-go/informers/storage/v1beta1 k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/scheme k8s.io/client-go/kubernetes/typed/admissionregistration/v1 +k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1 k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1 k8s.io/client-go/kubernetes/typed/apps/v1 k8s.io/client-go/kubernetes/typed/apps/v1beta1 k8s.io/client-go/kubernetes/typed/apps/v1beta2 k8s.io/client-go/kubernetes/typed/authentication/v1 +k8s.io/client-go/kubernetes/typed/authentication/v1alpha1 k8s.io/client-go/kubernetes/typed/authentication/v1beta1 k8s.io/client-go/kubernetes/typed/authorization/v1 k8s.io/client-go/kubernetes/typed/authorization/v1beta1 @@ -807,6 +835,7 @@ k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2 k8s.io/client-go/kubernetes/typed/batch/v1 k8s.io/client-go/kubernetes/typed/batch/v1beta1 k8s.io/client-go/kubernetes/typed/certificates/v1 +k8s.io/client-go/kubernetes/typed/certificates/v1alpha1 k8s.io/client-go/kubernetes/typed/certificates/v1beta1 k8s.io/client-go/kubernetes/typed/coordination/v1 k8s.io/client-go/kubernetes/typed/coordination/v1beta1 @@ -819,7 +848,9 @@ k8s.io/client-go/kubernetes/typed/extensions/v1beta1 k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1 k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1 k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2 +k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3 k8s.io/client-go/kubernetes/typed/networking/v1 +k8s.io/client-go/kubernetes/typed/networking/v1alpha1 k8s.io/client-go/kubernetes/typed/networking/v1beta1 k8s.io/client-go/kubernetes/typed/node/v1 k8s.io/client-go/kubernetes/typed/node/v1alpha1 @@ -829,6 +860,7 @@ k8s.io/client-go/kubernetes/typed/policy/v1beta1 k8s.io/client-go/kubernetes/typed/rbac/v1 k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 k8s.io/client-go/kubernetes/typed/rbac/v1beta1 +k8s.io/client-go/kubernetes/typed/resource/v1alpha2 k8s.io/client-go/kubernetes/typed/scheduling/v1 k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 @@ -836,6 +868,7 @@ k8s.io/client-go/kubernetes/typed/storage/v1 k8s.io/client-go/kubernetes/typed/storage/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1beta1 k8s.io/client-go/listers/admissionregistration/v1 +k8s.io/client-go/listers/admissionregistration/v1alpha1 k8s.io/client-go/listers/admissionregistration/v1beta1 k8s.io/client-go/listers/apiserverinternal/v1alpha1 k8s.io/client-go/listers/apps/v1 @@ -848,6 +881,7 @@ k8s.io/client-go/listers/autoscaling/v2beta2 k8s.io/client-go/listers/batch/v1 k8s.io/client-go/listers/batch/v1beta1 k8s.io/client-go/listers/certificates/v1 +k8s.io/client-go/listers/certificates/v1alpha1 k8s.io/client-go/listers/certificates/v1beta1 k8s.io/client-go/listers/coordination/v1 k8s.io/client-go/listers/coordination/v1beta1 @@ -860,7 +894,9 @@ k8s.io/client-go/listers/extensions/v1beta1 k8s.io/client-go/listers/flowcontrol/v1alpha1 k8s.io/client-go/listers/flowcontrol/v1beta1 k8s.io/client-go/listers/flowcontrol/v1beta2 +k8s.io/client-go/listers/flowcontrol/v1beta3 k8s.io/client-go/listers/networking/v1 +k8s.io/client-go/listers/networking/v1alpha1 k8s.io/client-go/listers/networking/v1beta1 k8s.io/client-go/listers/node/v1 k8s.io/client-go/listers/node/v1alpha1 @@ -870,6 +906,7 @@ k8s.io/client-go/listers/policy/v1beta1 k8s.io/client-go/listers/rbac/v1 k8s.io/client-go/listers/rbac/v1alpha1 k8s.io/client-go/listers/rbac/v1beta1 +k8s.io/client-go/listers/resource/v1alpha2 k8s.io/client-go/listers/scheduling/v1 k8s.io/client-go/listers/scheduling/v1alpha1 k8s.io/client-go/listers/scheduling/v1beta1 @@ -889,6 +926,7 @@ k8s.io/client-go/rest/watch k8s.io/client-go/restmapper k8s.io/client-go/tools/auth k8s.io/client-go/tools/cache +k8s.io/client-go/tools/cache/synctrack k8s.io/client-go/tools/clientcmd k8s.io/client-go/tools/clientcmd/api k8s.io/client-go/tools/clientcmd/api/latest @@ -896,54 +934,63 @@ k8s.io/client-go/tools/clientcmd/api/v1 k8s.io/client-go/tools/metrics k8s.io/client-go/tools/pager k8s.io/client-go/tools/reference +k8s.io/client-go/tools/remotecommand k8s.io/client-go/transport +k8s.io/client-go/transport/spdy k8s.io/client-go/util/cert k8s.io/client-go/util/connrotation +k8s.io/client-go/util/exec k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/workqueue -# k8s.io/klog/v2 v2.60.1 +# k8s.io/klog/v2 v2.100.1 ## explicit; go 1.13 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer k8s.io/klog/v2/internal/clock +k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity -# k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 -## explicit; go 1.16 +# k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 +## explicit; go 1.19 k8s.io/kube-openapi/pkg/builder3/util +k8s.io/kube-openapi/pkg/cached k8s.io/kube-openapi/pkg/common k8s.io/kube-openapi/pkg/handler3 -k8s.io/kube-openapi/pkg/internal/handler +k8s.io/kube-openapi/pkg/internal +k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json k8s.io/kube-openapi/pkg/openapiconv k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/schemamutation k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 -## explicit; go 1.12 +# k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 +## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock k8s.io/utils/clock/testing k8s.io/utils/integer k8s.io/utils/internal/third_party/forked/golang/net k8s.io/utils/net +k8s.io/utils/pointer k8s.io/utils/strings/slices k8s.io/utils/trace -# sigs.k8s.io/controller-runtime v0.11.0 -## explicit; go 1.17 +# sigs.k8s.io/controller-runtime v0.15.1 +## explicit; go 1.20 sigs.k8s.io/controller-runtime/pkg/client sigs.k8s.io/controller-runtime/pkg/client/apiutil -sigs.k8s.io/controller-runtime/pkg/internal/objectutil +sigs.k8s.io/controller-runtime/pkg/controller/controllerutil sigs.k8s.io/controller-runtime/pkg/log -# sigs.k8s.io/e2e-framework v0.0.6 -## explicit; go 1.17 +# sigs.k8s.io/e2e-framework v0.3.0 +## explicit; go 1.19 sigs.k8s.io/e2e-framework/klient sigs.k8s.io/e2e-framework/klient/conf +sigs.k8s.io/e2e-framework/klient/decoder sigs.k8s.io/e2e-framework/klient/k8s sigs.k8s.io/e2e-framework/klient/k8s/resources +sigs.k8s.io/e2e-framework/klient/k8s/watcher sigs.k8s.io/e2e-framework/klient/wait sigs.k8s.io/e2e-framework/klient/wait/conditions sigs.k8s.io/e2e-framework/pkg/env @@ -952,14 +999,17 @@ sigs.k8s.io/e2e-framework/pkg/envfuncs sigs.k8s.io/e2e-framework/pkg/features sigs.k8s.io/e2e-framework/pkg/flags sigs.k8s.io/e2e-framework/pkg/internal/types +sigs.k8s.io/e2e-framework/support sigs.k8s.io/e2e-framework/support/kind -# sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 -## explicit; go 1.17 +sigs.k8s.io/e2e-framework/support/utils +# sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd +## explicit; go 1.18 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json -# sigs.k8s.io/structured-merge-diff/v4 v4.2.1 +# sigs.k8s.io/structured-merge-diff/v4 v4.2.3 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath +sigs.k8s.io/structured-merge-diff/v4/merge sigs.k8s.io/structured-merge-diff/v4/schema sigs.k8s.io/structured-merge-diff/v4/typed sigs.k8s.io/structured-merge-diff/v4/value diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go index c92b0eaae..6a1bfb546 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -20,7 +20,9 @@ limitations under the License. package apiutil import ( + "errors" "fmt" + "net/http" "reflect" "sync" @@ -30,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/restmapper" @@ -59,9 +62,13 @@ func AddToProtobufScheme(addToScheme func(*runtime.Scheme) error) error { // NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery // information fetched by a new client with the given config. -func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { +func NewDiscoveryRESTMapper(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } + // Get a mapper - dc, err := discovery.NewDiscoveryClientForConfig(c) + dc, err := discovery.NewDiscoveryClientForConfigAndClient(c, httpClient) if err != nil { return nil, err } @@ -72,6 +79,36 @@ func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { return restmapper.NewDiscoveryRESTMapper(gr), nil } +// IsObjectNamespaced returns true if the object is namespace scoped. +// For unstructured objects the gvk is found from the object itself. +func IsObjectNamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper meta.RESTMapper) (bool, error) { + gvk, err := GVKForObject(obj, scheme) + if err != nil { + return false, err + } + + return IsGVKNamespaced(gvk, restmapper) +} + +// IsGVKNamespaced returns true if the object having the provided +// GVK is namespace scoped. +func IsGVKNamespaced(gvk schema.GroupVersionKind, restmapper meta.RESTMapper) (bool, error) { + restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}) + if err != nil { + return false, fmt.Errorf("failed to get restmapping: %w", err) + } + + scope := restmapping.Scope.Name() + if scope == "" { + return false, errors.New("scope cannot be identified, empty scope returned") + } + + if scope != meta.RESTScopeNameRoot { + return true, nil + } + return false, nil +} + // GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK. func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { // TODO(directxman12): do we want to generalize this to arbitrary container types? @@ -81,7 +118,7 @@ func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersi // (unstructured, partial, etc) // check for PartialObjectMetadata, which is analogous to unstructured, but isn't handled by ObjectKinds - _, isPartial := obj.(*metav1.PartialObjectMetadata) //nolint:ifshort + _, isPartial := obj.(*metav1.PartialObjectMetadata) _, isPartialList := obj.(*metav1.PartialObjectMetadataList) if isPartial || isPartialList { // we require that the GVK be populated in order to recognize the object @@ -95,6 +132,7 @@ func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersi return gvk, nil } + // Use the given scheme to retrieve all the GVKs for the object. gvks, isUnversioned, err := scheme.ObjectKinds(obj) if err != nil { return schema.GroupVersionKind{}, err @@ -103,36 +141,49 @@ func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersi return schema.GroupVersionKind{}, fmt.Errorf("cannot create group-version-kind for unversioned type %T", obj) } - if len(gvks) < 1 { - return schema.GroupVersionKind{}, fmt.Errorf("no group-version-kinds associated with type %T", obj) - } - if len(gvks) > 1 { - // this should only trigger for things like metav1.XYZ -- - // normal versioned types should be fine + switch { + case len(gvks) < 1: + // If the object has no GVK, the object might not have been registered with the scheme. + // or it's not a valid object. + return schema.GroupVersionKind{}, fmt.Errorf("no GroupVersionKind associated with Go type %T, was the type registered with the Scheme?", obj) + case len(gvks) > 1: + err := fmt.Errorf("multiple GroupVersionKinds associated with Go type %T within the Scheme, this can happen when a type is registered for multiple GVKs at the same time", obj) + + // We've found multiple GVKs for the object. + currentGVK := obj.GetObjectKind().GroupVersionKind() + if !currentGVK.Empty() { + // If the base object has a GVK, check if it's in the list of GVKs before using it. + for _, gvk := range gvks { + if gvk == currentGVK { + return gvk, nil + } + } + + return schema.GroupVersionKind{}, fmt.Errorf( + "%w: the object's supplied GroupVersionKind %q was not found in the Scheme's list; refusing to guess at one: %q", err, currentGVK, gvks) + } + + // This should only trigger for things like metav1.XYZ -- + // normal versioned types should be fine. + // + // See https://github.com/kubernetes-sigs/controller-runtime/issues/362 + // for more information. return schema.GroupVersionKind{}, fmt.Errorf( - "multiple group-version-kinds associated with type %T, refusing to guess at one", obj) + "%w: callers can either fix their type registration to only register it once, or specify the GroupVersionKind to use for object passed in; refusing to guess at one: %q", err, gvks) + default: + // In any other case, we've found a single GVK for the object. + return gvks[0], nil } - return gvks[0], nil } // RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated // with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from // baseConfig, if set, otherwise a default serializer will be set. -func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) { - return rest.RESTClientFor(createRestConfig(gvk, isUnstructured, baseConfig, codecs)) -} - -// serializerWithDecodedGVK is a CodecFactory that overrides the DecoderToVersion of a WithoutConversionCodecFactory -// in order to avoid clearing the GVK from the decoded object. -// -// See https://github.com/kubernetes/kubernetes/issues/80609. -type serializerWithDecodedGVK struct { - serializer.WithoutConversionCodecFactory -} - -// DecoderToVersion returns an decoder that does not do conversion. -func (f serializerWithDecodedGVK) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { - return serializer +func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory, httpClient *http.Client) (rest.Interface, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } + return rest.RESTClientForConfigAndClient(createRestConfig(gvk, isUnstructured, baseConfig, codecs), httpClient) } // createRestConfig copies the base config and updates needed fields for a new rest config. @@ -159,9 +210,8 @@ func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConf } if isUnstructured { - // If the object is unstructured, we need to preserve the GVK information. - // Use our own custom serializer. - cfg.NegotiatedSerializer = serializerWithDecodedGVK{serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} + // If the object is unstructured, we use the client-go dynamic serializer. + cfg = dynamic.ConfigFor(cfg) } else { cfg.NegotiatedSerializer = serializerWithTargetZeroingDecode{NegotiatedSerializer: serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go deleted file mode 100644 index 56a00371f..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go +++ /dev/null @@ -1,285 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apiutil - -import ( - "errors" - "sync" - - "golang.org/x/time/rate" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/discovery" - "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" -) - -// dynamicRESTMapper is a RESTMapper that dynamically discovers resource -// types at runtime. -type dynamicRESTMapper struct { - mu sync.RWMutex // protects the following fields - staticMapper meta.RESTMapper - limiter *rate.Limiter - newMapper func() (meta.RESTMapper, error) - - lazy bool - // Used for lazy init. - initOnce sync.Once -} - -// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper. -type DynamicRESTMapperOption func(*dynamicRESTMapper) error - -// WithLimiter sets the RESTMapper's underlying limiter to lim. -func WithLimiter(lim *rate.Limiter) DynamicRESTMapperOption { - return func(drm *dynamicRESTMapper) error { - drm.limiter = lim - return nil - } -} - -// WithLazyDiscovery prevents the RESTMapper from discovering REST mappings -// until an API call is made. -var WithLazyDiscovery DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error { - drm.lazy = true - return nil -} - -// WithCustomMapper supports setting a custom RESTMapper refresher instead of -// the default method, which uses a discovery client. -// -// This exists mainly for testing, but can be useful if you need tighter control -// over how discovery is performed, which discovery endpoints are queried, etc. -func WithCustomMapper(newMapper func() (meta.RESTMapper, error)) DynamicRESTMapperOption { - return func(drm *dynamicRESTMapper) error { - drm.newMapper = newMapper - return nil - } -} - -// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic -// RESTMapper dynamically discovers resource types at runtime. opts -// configure the RESTMapper. -func NewDynamicRESTMapper(cfg *rest.Config, opts ...DynamicRESTMapperOption) (meta.RESTMapper, error) { - client, err := discovery.NewDiscoveryClientForConfig(cfg) - if err != nil { - return nil, err - } - drm := &dynamicRESTMapper{ - limiter: rate.NewLimiter(rate.Limit(defaultRefillRate), defaultLimitSize), - newMapper: func() (meta.RESTMapper, error) { - groupResources, err := restmapper.GetAPIGroupResources(client) - if err != nil { - return nil, err - } - return restmapper.NewDiscoveryRESTMapper(groupResources), nil - }, - } - for _, opt := range opts { - if err = opt(drm); err != nil { - return nil, err - } - } - if !drm.lazy { - if err := drm.setStaticMapper(); err != nil { - return nil, err - } - } - return drm, nil -} - -var ( - // defaultRefilRate is the default rate at which potential calls are - // added back to the "bucket" of allowed calls. - defaultRefillRate = 5 - // defaultLimitSize is the default starting/max number of potential calls - // per second. Once a call is used, it's added back to the bucket at a rate - // of defaultRefillRate per second. - defaultLimitSize = 5 -) - -// setStaticMapper sets drm's staticMapper by querying its client, regardless -// of reload backoff. -func (drm *dynamicRESTMapper) setStaticMapper() error { - newMapper, err := drm.newMapper() - if err != nil { - return err - } - drm.staticMapper = newMapper - return nil -} - -// init initializes drm only once if drm is lazy. -func (drm *dynamicRESTMapper) init() (err error) { - drm.initOnce.Do(func() { - if drm.lazy { - err = drm.setStaticMapper() - } - }) - return err -} - -// checkAndReload attempts to call the given callback, which is assumed to be dependent -// on the data in the restmapper. -// -// If the callback returns an error that matches the given error, it will attempt to reload -// the RESTMapper's data and re-call the callback once that's occurred. -// If the callback returns any other error, the function will return immediately regardless. -// -// It will take care of ensuring that reloads are rate-limited and that extraneous calls -// aren't made. If a reload would exceed the limiters rate, it returns the error return by -// the callback. -// It's thread-safe, and worries about thread-safety for the callback (so the callback does -// not need to attempt to lock the restmapper). -func (drm *dynamicRESTMapper) checkAndReload(needsReloadErr error, checkNeedsReload func() error) error { - // first, check the common path -- data is fresh enough - // (use an IIFE for the lock's defer) - err := func() error { - drm.mu.RLock() - defer drm.mu.RUnlock() - - return checkNeedsReload() - }() - - // NB(directxman12): `Is` and `As` have a confusing relationship -- - // `Is` is like `== or does this implement .Is`, whereas `As` says - // `can I type-assert into` - needsReload := errors.As(err, &needsReloadErr) - if !needsReload { - return err - } - - // if the data wasn't fresh, we'll need to try and update it, so grab the lock... - drm.mu.Lock() - defer drm.mu.Unlock() - - // ... and double-check that we didn't reload in the meantime - err = checkNeedsReload() - needsReload = errors.As(err, &needsReloadErr) - if !needsReload { - return err - } - - // we're still stale, so grab a rate-limit token if we can... - if !drm.limiter.Allow() { - // return error from static mapper here, we have refreshed often enough (exceeding rate of provided limiter) - // so that client's can handle this the same way as a "normal" NoResourceMatchError / NoKindMatchError - return err - } - - // ...reload... - if err := drm.setStaticMapper(); err != nil { - return err - } - - // ...and return the results of the closure regardless - return checkNeedsReload() -} - -// TODO: wrap reload errors on NoKindMatchError with go 1.13 errors. - -func (drm *dynamicRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - if err := drm.init(); err != nil { - return schema.GroupVersionKind{}, err - } - var gvk schema.GroupVersionKind - err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { - var err error - gvk, err = drm.staticMapper.KindFor(resource) - return err - }) - return gvk, err -} - -func (drm *dynamicRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { - if err := drm.init(); err != nil { - return nil, err - } - var gvks []schema.GroupVersionKind - err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { - var err error - gvks, err = drm.staticMapper.KindsFor(resource) - return err - }) - return gvks, err -} - -func (drm *dynamicRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { - if err := drm.init(); err != nil { - return schema.GroupVersionResource{}, err - } - - var gvr schema.GroupVersionResource - err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { - var err error - gvr, err = drm.staticMapper.ResourceFor(input) - return err - }) - return gvr, err -} - -func (drm *dynamicRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - if err := drm.init(); err != nil { - return nil, err - } - var gvrs []schema.GroupVersionResource - err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { - var err error - gvrs, err = drm.staticMapper.ResourcesFor(input) - return err - }) - return gvrs, err -} - -func (drm *dynamicRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { - if err := drm.init(); err != nil { - return nil, err - } - var mapping *meta.RESTMapping - err := drm.checkAndReload(&meta.NoKindMatchError{}, func() error { - var err error - mapping, err = drm.staticMapper.RESTMapping(gk, versions...) - return err - }) - return mapping, err -} - -func (drm *dynamicRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { - if err := drm.init(); err != nil { - return nil, err - } - var mappings []*meta.RESTMapping - err := drm.checkAndReload(&meta.NoKindMatchError{}, func() error { - var err error - mappings, err = drm.staticMapper.RESTMappings(gk, versions...) - return err - }) - return mappings, err -} - -func (drm *dynamicRESTMapper) ResourceSingularizer(resource string) (string, error) { - if err := drm.init(); err != nil { - return "", err - } - var singular string - err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { - var err error - singular, err = drm.staticMapper.ResourceSingularizer(resource) - return err - }) - return singular, err -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go new file mode 100644 index 000000000..e0ff72dc1 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go @@ -0,0 +1,293 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "fmt" + "net/http" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic +// RESTMapper dynamically discovers resource types at runtime. +func NewDynamicRESTMapper(cfg *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } + + client, err := discovery.NewDiscoveryClientForConfigAndClient(cfg, httpClient) + if err != nil { + return nil, err + } + return &mapper{ + mapper: restmapper.NewDiscoveryRESTMapper([]*restmapper.APIGroupResources{}), + client: client, + knownGroups: map[string]*restmapper.APIGroupResources{}, + apiGroups: map[string]*metav1.APIGroup{}, + }, nil +} + +// mapper is a RESTMapper that will lazily query the provided +// client for discovery information to do REST mappings. +type mapper struct { + mapper meta.RESTMapper + client *discovery.DiscoveryClient + knownGroups map[string]*restmapper.APIGroupResources + apiGroups map[string]*metav1.APIGroup + + // mutex to provide thread-safe mapper reloading. + mu sync.RWMutex +} + +// KindFor implements Mapper.KindFor. +func (m *mapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + res, err := m.getMapper().KindFor(resource) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { + return schema.GroupVersionKind{}, err + } + res, err = m.getMapper().KindFor(resource) + } + + return res, err +} + +// KindsFor implements Mapper.KindsFor. +func (m *mapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + res, err := m.getMapper().KindsFor(resource) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { + return nil, err + } + res, err = m.getMapper().KindsFor(resource) + } + + return res, err +} + +// ResourceFor implements Mapper.ResourceFor. +func (m *mapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + res, err := m.getMapper().ResourceFor(input) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { + return schema.GroupVersionResource{}, err + } + res, err = m.getMapper().ResourceFor(input) + } + + return res, err +} + +// ResourcesFor implements Mapper.ResourcesFor. +func (m *mapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + res, err := m.getMapper().ResourcesFor(input) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { + return nil, err + } + res, err = m.getMapper().ResourcesFor(input) + } + + return res, err +} + +// RESTMapping implements Mapper.RESTMapping. +func (m *mapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + res, err := m.getMapper().RESTMapping(gk, versions...) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { + return nil, err + } + res, err = m.getMapper().RESTMapping(gk, versions...) + } + + return res, err +} + +// RESTMappings implements Mapper.RESTMappings. +func (m *mapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + res, err := m.getMapper().RESTMappings(gk, versions...) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { + return nil, err + } + res, err = m.getMapper().RESTMappings(gk, versions...) + } + + return res, err +} + +// ResourceSingularizer implements Mapper.ResourceSingularizer. +func (m *mapper) ResourceSingularizer(resource string) (string, error) { + return m.getMapper().ResourceSingularizer(resource) +} + +func (m *mapper) getMapper() meta.RESTMapper { + m.mu.RLock() + defer m.mu.RUnlock() + return m.mapper +} + +// addKnownGroupAndReload reloads the mapper with updated information about missing API group. +// versions can be specified for partial updates, for instance for v1beta1 version only. +func (m *mapper) addKnownGroupAndReload(groupName string, versions ...string) error { + // versions will here be [""] if the forwarded Version value of + // GroupVersionResource (in calling method) was not specified. + if len(versions) == 1 && versions[0] == "" { + versions = nil + } + + // If no specific versions are set by user, we will scan all available ones for the API group. + // This operation requires 2 requests: /api and /apis, but only once. For all subsequent calls + // this data will be taken from cache. + if len(versions) == 0 { + apiGroup, err := m.findAPIGroupByName(groupName) + if err != nil { + return err + } + for _, version := range apiGroup.Versions { + versions = append(versions, version.Version) + } + } + + m.mu.Lock() + defer m.mu.Unlock() + + // Create or fetch group resources from cache. + groupResources := &restmapper.APIGroupResources{ + Group: metav1.APIGroup{Name: groupName}, + VersionedResources: make(map[string][]metav1.APIResource), + } + if _, ok := m.knownGroups[groupName]; ok { + groupResources = m.knownGroups[groupName] + } + + // Update information for group resources about versioned resources. + // The number of API calls is equal to the number of versions: /apis//. + groupVersionResources, err := m.fetchGroupVersionResources(groupName, versions...) + if err != nil { + return fmt.Errorf("failed to get API group resources: %w", err) + } + for version, resources := range groupVersionResources { + groupResources.VersionedResources[version.Version] = resources.APIResources + } + + // Update information for group resources about the API group by adding new versions. + // Ignore the versions that are already registered. + for _, version := range versions { + found := false + for _, v := range groupResources.Group.Versions { + if v.Version == version { + found = true + break + } + } + + if !found { + groupResources.Group.Versions = append(groupResources.Group.Versions, metav1.GroupVersionForDiscovery{ + GroupVersion: metav1.GroupVersion{Group: groupName, Version: version}.String(), + Version: version, + }) + } + } + + // Update data in the cache. + m.knownGroups[groupName] = groupResources + + // Finally, update the group with received information and regenerate the mapper. + updatedGroupResources := make([]*restmapper.APIGroupResources, 0, len(m.knownGroups)) + for _, agr := range m.knownGroups { + updatedGroupResources = append(updatedGroupResources, agr) + } + + m.mapper = restmapper.NewDiscoveryRESTMapper(updatedGroupResources) + return nil +} + +// findAPIGroupByNameLocked returns API group by its name. +func (m *mapper) findAPIGroupByName(groupName string) (*metav1.APIGroup, error) { + // Looking in the cache first. + { + m.mu.RLock() + group, ok := m.apiGroups[groupName] + m.mu.RUnlock() + if ok { + return group, nil + } + } + + // Update the cache if nothing was found. + apiGroups, err := m.client.ServerGroups() + if err != nil { + return nil, fmt.Errorf("failed to get server groups: %w", err) + } + if len(apiGroups.Groups) == 0 { + return nil, fmt.Errorf("received an empty API groups list") + } + + m.mu.Lock() + for i := range apiGroups.Groups { + group := &apiGroups.Groups[i] + m.apiGroups[group.Name] = group + } + m.mu.Unlock() + + // Looking in the cache again. + { + m.mu.RLock() + group, ok := m.apiGroups[groupName] + m.mu.RUnlock() + if ok { + return group, nil + } + } + + // If there is still nothing, return an error. + return nil, fmt.Errorf("failed to find API group %q", groupName) +} + +// fetchGroupVersionResources fetches the resources for the specified group and its versions. +func (m *mapper) fetchGroupVersionResources(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { + groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) + failedGroups := make(map[schema.GroupVersion]error) + + for _, version := range versions { + groupVersion := schema.GroupVersion{Group: groupName, Version: version} + + apiResourceList, err := m.client.ServerResourcesForGroupVersion(groupVersion.String()) + if err != nil { + failedGroups[groupVersion] = err + } + if apiResourceList != nil { + // even in case of error, some fallback might have been returned. + groupVersionResources[groupVersion] = apiResourceList + } + } + + if len(failedGroups) > 0 { + return nil, &discovery.ErrGroupDiscoveryFailed{Groups: failedGroups} + } + + return groupVersionResources, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go index bbe36c467..0d8b9fbe1 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -18,12 +18,13 @@ package client import ( "context" + "errors" "fmt" + "net/http" "strings" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -35,6 +36,28 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" ) +// Options are creation options for a Client. +type Options struct { + // HTTPClient is the HTTP client to use for requests. + HTTPClient *http.Client + + // Scheme, if provided, will be used to map go structs to GroupVersionKinds + Scheme *runtime.Scheme + + // Mapper, if provided, will be used to map GroupVersionKinds to Resources + Mapper meta.RESTMapper + + // Cache, if provided, is used to read objects from the cache. + Cache *CacheOptions + + // WarningHandler is used to configure the warning handler responsible for + // surfacing and handling warnings messages sent by the API server. + WarningHandler WarningHandlerOptions + + // DryRun instructs the client to only perform dry run requests. + DryRun *bool +} + // WarningHandlerOptions are options for configuring a // warning handler for the client which is responsible // for surfacing API Server warnings. @@ -45,23 +68,25 @@ type WarningHandlerOptions struct { // AllowDuplicateLogs does not deduplicate the to-be // logged surfaced warnings messages. See // log.WarningHandlerOptions for considerations - // regarding deuplication + // regarding deduplication AllowDuplicateLogs bool } -// Options are creation options for a Client. -type Options struct { - // Scheme, if provided, will be used to map go structs to GroupVersionKinds - Scheme *runtime.Scheme - - // Mapper, if provided, will be used to map GroupVersionKinds to Resources - Mapper meta.RESTMapper - - // Opts is used to configure the warning handler responsible for - // surfacing and handling warnings messages sent by the API server. - Opts WarningHandlerOptions +// CacheOptions are options for creating a cache-backed client. +type CacheOptions struct { + // Reader is a cache-backed reader that will be used to read objects from the cache. + // +required + Reader Reader + // DisableFor is a list of objects that should not be read from the cache. + DisableFor []Object + // Unstructured is a flag that indicates whether the cache-backed client should + // read unstructured objects or lists from the cache. + Unstructured bool } +// NewClientFunc allows a user to define how to create a client. +type NewClientFunc func(config *rest.Config, options Options) (Client, error) + // New returns a new Client using the provided config and Options. // The returned client reads *and* writes directly from the server // (it doesn't use object caches). It understands how to work with @@ -72,8 +97,12 @@ type Options struct { // corresponding group, version, and kind for the given type. In the // case of unstructured types, the group, version, and kind will be extracted // from the corresponding fields on the object. -func New(config *rest.Config, options Options) (Client, error) { - return newClient(config, options) +func New(config *rest.Config, options Options) (c Client, err error) { + c, err = newClient(config, options) + if err == nil && options.DryRun != nil && *options.DryRun { + c = NewDryRunClient(c) + } + return c, err } func newClient(config *rest.Config, options Options) (*client, error) { @@ -81,23 +110,35 @@ func newClient(config *rest.Config, options Options) (*client, error) { return nil, fmt.Errorf("must provide non-nil rest.Config to client.New") } - if !options.Opts.SuppressWarnings { + config = rest.CopyConfig(config) + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + if !options.WarningHandler.SuppressWarnings { // surface warnings logger := log.Log.WithName("KubeAPIWarningLogger") // Set a WarningHandler, the default WarningHandler // is log.KubeAPIWarningLogger with deduplication enabled. // See log.KubeAPIWarningLoggerOptions for considerations // regarding deduplication. - rest.SetDefaultWarningHandler( - log.NewKubeAPIWarningLogger( - logger, - log.KubeAPIWarningLoggerOptions{ - Deduplicate: !options.Opts.AllowDuplicateLogs, - }, - ), + config.WarningHandler = log.NewKubeAPIWarningLogger( + logger, + log.KubeAPIWarningLoggerOptions{ + Deduplicate: !options.WarningHandler.AllowDuplicateLogs, + }, ) } + // Use the rest HTTP client for the provided config if unset + if options.HTTPClient == nil { + var err error + options.HTTPClient, err = rest.HTTPClientFor(config) + if err != nil { + return nil, err + } + } + // Init a scheme if none provided if options.Scheme == nil { options.Scheme = scheme.Scheme @@ -106,34 +147,35 @@ func newClient(config *rest.Config, options Options) (*client, error) { // Init a Mapper if none provided if options.Mapper == nil { var err error - options.Mapper, err = apiutil.NewDynamicRESTMapper(config) + options.Mapper, err = apiutil.NewDynamicRESTMapper(config, options.HTTPClient) if err != nil { return nil, err } } - clientcache := &clientCache{ - config: config, - scheme: options.Scheme, - mapper: options.Mapper, - codecs: serializer.NewCodecFactory(options.Scheme), + resources := &clientRestResources{ + httpClient: options.HTTPClient, + config: config, + scheme: options.Scheme, + mapper: options.Mapper, + codecs: serializer.NewCodecFactory(options.Scheme), structuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), unstructuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), } - rawMetaClient, err := metadata.NewForConfig(config) + rawMetaClient, err := metadata.NewForConfigAndClient(metadata.ConfigFor(config), options.HTTPClient) if err != nil { return nil, fmt.Errorf("unable to construct metadata-only client for use as part of client: %w", err) } c := &client{ typedClient: typedClient{ - cache: clientcache, + resources: resources, paramCodec: runtime.NewParameterCodec(options.Scheme), }, unstructuredClient: unstructuredClient{ - cache: clientcache, + resources: resources, paramCodec: noConversionParamCodec{}, }, metadataClient: metadataClient{ @@ -143,20 +185,65 @@ func newClient(config *rest.Config, options Options) (*client, error) { scheme: options.Scheme, mapper: options.Mapper, } + if options.Cache == nil || options.Cache.Reader == nil { + return c, nil + } + + // We want a cache if we're here. + // Set the cache. + c.cache = options.Cache.Reader + // Load uncached GVKs. + c.cacheUnstructured = options.Cache.Unstructured + c.uncachedGVKs = map[schema.GroupVersionKind]struct{}{} + for _, obj := range options.Cache.DisableFor { + gvk, err := c.GroupVersionKindFor(obj) + if err != nil { + return nil, err + } + c.uncachedGVKs[gvk] = struct{}{} + } return c, nil } var _ Client = &client{} -// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes -// new clients at the time they are used, and caches the client. +// client is a client.Client that reads and writes directly from/to an API server. +// It lazily initializes new clients at the time they are used. type client struct { typedClient typedClient unstructuredClient unstructuredClient metadataClient metadataClient scheme *runtime.Scheme mapper meta.RESTMapper + + cache Reader + uncachedGVKs map[schema.GroupVersionKind]struct{} + cacheUnstructured bool +} + +func (c *client) shouldBypassCache(obj runtime.Object) (bool, error) { + if c.cache == nil { + return true, nil + } + + gvk, err := c.GroupVersionKindFor(obj) + if err != nil { + return false, err + } + // TODO: this is producing unsafe guesses that don't actually work, + // but it matches ~99% of the cases out there. + if meta.IsListType(obj) { + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + } + if _, isUncached := c.uncachedGVKs[gvk]; isUncached { + return true, nil + } + if !c.cacheUnstructured { + _, isUnstructured := obj.(runtime.Unstructured) + return isUnstructured, nil + } + return false, nil } // resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object. @@ -168,6 +255,16 @@ func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersi } } +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (c *client) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return apiutil.GVKForObject(obj, c.scheme) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (c *client) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return apiutil.IsObjectNamespaced(obj, c.scheme, c.mapper) +} + // Scheme returns the scheme this client is using. func (c *client) Scheme() *runtime.Scheme { return c.scheme @@ -181,7 +278,7 @@ func (c *client) RESTMapper() meta.RESTMapper { // Create implements client.Client. func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) error { switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Create(ctx, obj, opts...) case *metav1.PartialObjectMetadata: return fmt.Errorf("cannot create using only metadata") @@ -194,7 +291,7 @@ func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) e func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Update(ctx, obj, opts...) case *metav1.PartialObjectMetadata: return fmt.Errorf("cannot update using only metadata -- did you mean to patch?") @@ -206,7 +303,7 @@ func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) e // Delete implements client.Client. func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Delete(ctx, obj, opts...) case *metav1.PartialObjectMetadata: return c.metadataClient.Delete(ctx, obj, opts...) @@ -218,7 +315,7 @@ func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) e // DeleteAllOf implements client.Client. func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...) case *metav1.PartialObjectMetadata: return c.metadataClient.DeleteAllOf(ctx, obj, opts...) @@ -231,7 +328,7 @@ func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllO func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Patch(ctx, obj, patch, opts...) case *metav1.PartialObjectMetadata: return c.metadataClient.Patch(ctx, obj, patch, opts...) @@ -241,23 +338,35 @@ func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...Pat } // Get implements client.Client. -func (c *client) Get(ctx context.Context, key ObjectKey, obj Object) error { +func (c *client) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + if isUncached, err := c.shouldBypassCache(obj); err != nil { + return err + } else if !isUncached { + return c.cache.Get(ctx, key, obj, opts...) + } + switch obj.(type) { - case *unstructured.Unstructured: - return c.unstructuredClient.Get(ctx, key, obj) + case runtime.Unstructured: + return c.unstructuredClient.Get(ctx, key, obj, opts...) case *metav1.PartialObjectMetadata: // Metadata only object should always preserve the GVK coming in from the caller. defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) - return c.metadataClient.Get(ctx, key, obj) + return c.metadataClient.Get(ctx, key, obj, opts...) default: - return c.typedClient.Get(ctx, key, obj) + return c.typedClient.Get(ctx, key, obj, opts...) } } // List implements client.Client. func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + if isUncached, err := c.shouldBypassCache(obj); err != nil { + return err + } else if !isUncached { + return c.cache.List(ctx, obj, opts...) + } + switch x := obj.(type) { - case *unstructured.UnstructuredList: + case runtime.Unstructured: return c.unstructuredClient.List(ctx, obj, opts...) case *metav1.PartialObjectMetadataList: // Metadata only object should always preserve the GVK. @@ -289,40 +398,194 @@ func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) e } // Status implements client.StatusClient. -func (c *client) Status() StatusWriter { - return &statusWriter{client: c} +func (c *client) Status() SubResourceWriter { + return c.SubResource("status") +} + +func (c *client) SubResource(subResource string) SubResourceClient { + return &subResourceClient{client: c, subResource: subResource} +} + +// subResourceClient is client.SubResourceWriter that writes to subresources. +type subResourceClient struct { + client *client + subResource string +} + +// ensure subResourceClient implements client.SubResourceClient. +var _ SubResourceClient = &subResourceClient{} + +// SubResourceGetOptions holds all the possible configuration +// for a subresource Get request. +type SubResourceGetOptions struct { + Raw *metav1.GetOptions +} + +// ApplyToSubResourceGet updates the configuaration to the given get options. +func (getOpt *SubResourceGetOptions) ApplyToSubResourceGet(o *SubResourceGetOptions) { + if getOpt.Raw != nil { + o.Raw = getOpt.Raw + } +} + +// ApplyOptions applues the given options. +func (getOpt *SubResourceGetOptions) ApplyOptions(opts []SubResourceGetOption) *SubResourceGetOptions { + for _, o := range opts { + o.ApplyToSubResourceGet(getOpt) + } + + return getOpt +} + +// AsGetOptions returns the configured options as *metav1.GetOptions. +func (getOpt *SubResourceGetOptions) AsGetOptions() *metav1.GetOptions { + if getOpt.Raw == nil { + return &metav1.GetOptions{} + } + return getOpt.Raw +} + +// SubResourceUpdateOptions holds all the possible configuration +// for a subresource update request. +type SubResourceUpdateOptions struct { + UpdateOptions + SubResourceBody Object +} + +// ApplyToSubResourceUpdate updates the configuration on the given create options +func (uo *SubResourceUpdateOptions) ApplyToSubResourceUpdate(o *SubResourceUpdateOptions) { + uo.UpdateOptions.ApplyToUpdate(&o.UpdateOptions) + if uo.SubResourceBody != nil { + o.SubResourceBody = uo.SubResourceBody + } +} + +// ApplyOptions applies the given options. +func (uo *SubResourceUpdateOptions) ApplyOptions(opts []SubResourceUpdateOption) *SubResourceUpdateOptions { + for _, o := range opts { + o.ApplyToSubResourceUpdate(uo) + } + + return uo +} + +// SubResourceUpdateAndPatchOption is an option that can be used for either +// a subresource update or patch request. +type SubResourceUpdateAndPatchOption interface { + SubResourceUpdateOption + SubResourcePatchOption +} + +// WithSubResourceBody returns an option that uses the given body +// for a subresource Update or Patch operation. +func WithSubResourceBody(body Object) SubResourceUpdateAndPatchOption { + return &withSubresourceBody{body: body} +} + +type withSubresourceBody struct { + body Object +} + +func (wsr *withSubresourceBody) ApplyToSubResourceUpdate(o *SubResourceUpdateOptions) { + o.SubResourceBody = wsr.body } -// statusWriter is client.StatusWriter that writes status subresource. -type statusWriter struct { - client *client +func (wsr *withSubresourceBody) ApplyToSubResourcePatch(o *SubResourcePatchOptions) { + o.SubResourceBody = wsr.body } -// ensure statusWriter implements client.StatusWriter. -var _ StatusWriter = &statusWriter{} +// SubResourceCreateOptions are all the possible configurations for a subresource +// create request. +type SubResourceCreateOptions struct { + CreateOptions +} + +// ApplyOptions applies the given options. +func (co *SubResourceCreateOptions) ApplyOptions(opts []SubResourceCreateOption) *SubResourceCreateOptions { + for _, o := range opts { + o.ApplyToSubResourceCreate(co) + } + + return co +} + +// ApplyToSubresourceCreate applies the the configuration on the given create options. +func (co *SubResourceCreateOptions) ApplyToSubresourceCreate(o *SubResourceCreateOptions) { + co.CreateOptions.ApplyToCreate(&co.CreateOptions) +} + +// SubResourcePatchOptions holds all possible configurations for a subresource patch +// request. +type SubResourcePatchOptions struct { + PatchOptions + SubResourceBody Object +} + +// ApplyOptions applies the given options. +func (po *SubResourcePatchOptions) ApplyOptions(opts []SubResourcePatchOption) *SubResourcePatchOptions { + for _, o := range opts { + o.ApplyToSubResourcePatch(po) + } + + return po +} + +// ApplyToSubResourcePatch applies the configuration on the given patch options. +func (po *SubResourcePatchOptions) ApplyToSubResourcePatch(o *SubResourcePatchOptions) { + po.PatchOptions.ApplyToPatch(&o.PatchOptions) + if po.SubResourceBody != nil { + o.SubResourceBody = po.SubResourceBody + } +} -// Update implements client.StatusWriter. -func (sw *statusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { - defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) +func (sc *subResourceClient) Get(ctx context.Context, obj Object, subResource Object, opts ...SubResourceGetOption) error { switch obj.(type) { - case *unstructured.Unstructured: - return sw.client.unstructuredClient.UpdateStatus(ctx, obj, opts...) + case runtime.Unstructured: + return sc.client.unstructuredClient.GetSubResource(ctx, obj, subResource, sc.subResource, opts...) + case *metav1.PartialObjectMetadata: + return errors.New("can not get subresource using only metadata") + default: + return sc.client.typedClient.GetSubResource(ctx, obj, subResource, sc.subResource, opts...) + } +} + +// Create implements client.SubResourceClient +func (sc *subResourceClient) Create(ctx context.Context, obj Object, subResource Object, opts ...SubResourceCreateOption) error { + defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + defer sc.client.resetGroupVersionKind(subResource, subResource.GetObjectKind().GroupVersionKind()) + + switch obj.(type) { + case runtime.Unstructured: + return sc.client.unstructuredClient.CreateSubResource(ctx, obj, subResource, sc.subResource, opts...) case *metav1.PartialObjectMetadata: return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") default: - return sw.client.typedClient.UpdateStatus(ctx, obj, opts...) + return sc.client.typedClient.CreateSubResource(ctx, obj, subResource, sc.subResource, opts...) } } -// Patch implements client.Client. -func (sw *statusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { - defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) +// Update implements client.SubResourceClient +func (sc *subResourceClient) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { + defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case runtime.Unstructured: + return sc.client.unstructuredClient.UpdateSubResource(ctx, obj, sc.subResource, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") + default: + return sc.client.typedClient.UpdateSubResource(ctx, obj, sc.subResource, opts...) + } +} + +// Patch implements client.SubResourceWriter. +func (sc *subResourceClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { + defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: - return sw.client.unstructuredClient.PatchStatus(ctx, obj, patch, opts...) + case runtime.Unstructured: + return sc.client.unstructuredClient.PatchSubResource(ctx, obj, sc.subResource, patch, opts...) case *metav1.PartialObjectMetadata: - return sw.client.metadataClient.PatchStatus(ctx, obj, patch, opts...) + return sc.client.metadataClient.PatchSubResource(ctx, obj, sc.subResource, patch, opts...) default: - return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...) + return sc.client.typedClient.PatchSubResource(ctx, obj, sc.subResource, patch, opts...) } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go similarity index 82% rename from vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go index 857a0b38a..2d0787952 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go @@ -17,12 +17,12 @@ limitations under the License. package client import ( + "net/http" "strings" "sync" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -30,8 +30,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// clientCache creates and caches rest clients and metadata for Kubernetes types. -type clientCache struct { +// clientRestResources creates and stores rest clients and metadata for Kubernetes types. +type clientRestResources struct { + // httpClient is the http client to use for requests + httpClient *http.Client + // config is the rest.Config to talk to an apiserver config *rest.Config @@ -44,22 +47,22 @@ type clientCache struct { // codecs are used to create a REST client for a gvk codecs serializer.CodecFactory - // structuredResourceByType caches structured type metadata + // structuredResourceByType stores structured type metadata structuredResourceByType map[schema.GroupVersionKind]*resourceMeta - // unstructuredResourceByType caches unstructured type metadata + // unstructuredResourceByType stores unstructured type metadata unstructuredResourceByType map[schema.GroupVersionKind]*resourceMeta mu sync.RWMutex } // newResource maps obj to a Kubernetes Resource and constructs a client for that Resource. // If the object is a list, the resource represents the item's type instead. -func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) { +func (c *clientRestResources) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) { if strings.HasSuffix(gvk.Kind, "List") && isList { // if this was a list, treat it as a request for the item's resource gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] } - client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs) + client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs, c.httpClient) if err != nil { return nil, err } @@ -72,15 +75,13 @@ func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList, isUnstruc // getResource returns the resource meta information for the given type of object. // If the object is a list, the resource represents the item's type instead. -func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { +func (c *clientRestResources) getResource(obj runtime.Object) (*resourceMeta, error) { gvk, err := apiutil.GVKForObject(obj, c.scheme) if err != nil { return nil, err } - _, isUnstructured := obj.(*unstructured.Unstructured) - _, isUnstructuredList := obj.(*unstructured.UnstructuredList) - isUnstructured = isUnstructured || isUnstructuredList + _, isUnstructured := obj.(runtime.Unstructured) // It's better to do creation work twice than to not let multiple // people make requests at once @@ -108,7 +109,7 @@ func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { } // getObjMeta returns objMeta containing both type and object metadata and state. -func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { +func (c *clientRestResources) getObjMeta(obj runtime.Object) (*objMeta, error) { r, err := c.getResource(obj) if err != nil { return nil, err @@ -120,7 +121,7 @@ func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { return &objMeta{resourceMeta: r, Object: m}, err } -// resourceMeta caches state for a Kubernetes type. +// resourceMeta stores state for a Kubernetes type. type resourceMeta struct { // client is the rest client used to talk to the apiserver rest.Interface diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go index 2965e5fa9..b2e202494 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go @@ -17,7 +17,7 @@ limitations under the License. // Package client contains functionality for interacting with Kubernetes API // servers. // -// Clients +// # Clients // // Clients are split into two interfaces -- Readers and Writers. Readers // get and list, while writers create, update, and delete. @@ -25,18 +25,18 @@ limitations under the License. // The New function can be used to create a new client that talks directly // to the API server. // -// A common pattern in Kubernetes to read from a cache and write to the API -// server. This pattern is covered by the DelegatingClient type, which can -// be used to have a client whose Reader is different from the Writer. +// It is a common pattern in Kubernetes to read from a cache and write to the API +// server. This pattern is covered by the creating the Client with a Cache. // -// Options +// # Options // // Many client operations in Kubernetes support options. These options are // represented as variadic arguments at the end of a given method call. // For instance, to use a label selector on list, you can call -// err := someReader.List(context.Background(), &podList, client.MatchingLabels{"somelabel": "someval"}) // -// Indexing +// err := someReader.List(context.Background(), &podList, client.MatchingLabels{"somelabel": "someval"}) +// +// # Indexing // // Indexes may be added to caches using a FieldIndexer. This allows you to easily // and efficiently look up objects with certain properties. You can then make diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go index ea25ea253..bbcdd3832 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go @@ -21,6 +21,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // NewDryRunClient wraps an existing client and enforces DryRun mode @@ -46,6 +47,16 @@ func (c *dryRunClient) RESTMapper() meta.RESTMapper { return c.client.RESTMapper() } +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (c *dryRunClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return c.client.GroupVersionKindFor(obj) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (c *dryRunClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return c.client.IsObjectNamespaced(obj) +} + // Create implements client.Client. func (c *dryRunClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { return c.client.Create(ctx, obj, append(opts, DryRunAll)...) @@ -72,8 +83,8 @@ func (c *dryRunClient) Patch(ctx context.Context, obj Object, patch Patch, opts } // Get implements client.Client. -func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj Object) error { - return c.client.Get(ctx, key, obj) +func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + return c.client.Get(ctx, key, obj, opts...) } // List implements client.Client. @@ -82,25 +93,38 @@ func (c *dryRunClient) List(ctx context.Context, obj ObjectList, opts ...ListOpt } // Status implements client.StatusClient. -func (c *dryRunClient) Status() StatusWriter { - return &dryRunStatusWriter{client: c.client.Status()} +func (c *dryRunClient) Status() SubResourceWriter { + return c.SubResource("status") +} + +// SubResource implements client.SubResourceClient. +func (c *dryRunClient) SubResource(subResource string) SubResourceClient { + return &dryRunSubResourceClient{client: c.client.SubResource(subResource)} } -// ensure dryRunStatusWriter implements client.StatusWriter. -var _ StatusWriter = &dryRunStatusWriter{} +// ensure dryRunSubResourceWriter implements client.SubResourceWriter. +var _ SubResourceWriter = &dryRunSubResourceClient{} -// dryRunStatusWriter is client.StatusWriter that writes status subresource with dryRun mode +// dryRunSubResourceClient is client.SubResourceWriter that writes status subresource with dryRun mode // enforced. -type dryRunStatusWriter struct { - client StatusWriter +type dryRunSubResourceClient struct { + client SubResourceClient +} + +func (sw *dryRunSubResourceClient) Get(ctx context.Context, obj, subResource Object, opts ...SubResourceGetOption) error { + return sw.client.Get(ctx, obj, subResource, opts...) +} + +func (sw *dryRunSubResourceClient) Create(ctx context.Context, obj, subResource Object, opts ...SubResourceCreateOption) error { + return sw.client.Create(ctx, obj, subResource, append(opts, DryRunAll)...) } -// Update implements client.StatusWriter. -func (sw *dryRunStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { +// Update implements client.SubResourceWriter. +func (sw *dryRunSubResourceClient) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { return sw.client.Update(ctx, obj, append(opts, DryRunAll)...) } -// Patch implements client.StatusWriter. -func (sw *dryRunStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { +// Patch implements client.SubResourceWriter. +func (sw *dryRunSubResourceClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { return sw.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go index 58c2ece15..0ddda3163 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go @@ -20,6 +20,7 @@ import ( "context" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" @@ -50,7 +51,7 @@ type Reader interface { // Get retrieves an obj for the given object key from the Kubernetes Cluster. // obj must be a struct pointer so that obj can be updated with the response // returned by the Server. - Get(ctx context.Context, key ObjectKey, obj Object) error + Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error // List retrieves list of objects for a given namespace and list options. On a // successful call, Items field in the list will be populated with the @@ -60,7 +61,8 @@ type Reader interface { // Writer knows how to create, delete, and update Kubernetes objects. type Writer interface { - // Create saves the object obj in the Kubernetes cluster. + // Create saves the object obj in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. Create(ctx context.Context, obj Object, opts ...CreateOption) error // Delete deletes the given obj from Kubernetes cluster. @@ -81,20 +83,80 @@ type Writer interface { // StatusClient knows how to create a client which can update status subresource // for kubernetes objects. type StatusClient interface { - Status() StatusWriter + Status() SubResourceWriter } -// StatusWriter knows how to update status subresource of a Kubernetes object. -type StatusWriter interface { +// SubResourceClientConstructor knows how to create a client which can update subresource +// for kubernetes objects. +type SubResourceClientConstructor interface { + // SubResourceClientConstructor returns a subresource client for the named subResource. Known + // upstream subResources usages are: + // - ServiceAccount token creation: + // sa := &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}} + // token := &authenticationv1.TokenRequest{} + // c.SubResourceClient("token").Create(ctx, sa, token) + // + // - Pod eviction creation: + // pod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}} + // c.SubResourceClient("eviction").Create(ctx, pod, &policyv1.Eviction{}) + // + // - Pod binding creation: + // pod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}} + // binding := &corev1.Binding{Target: corev1.ObjectReference{Name: "my-node"}} + // c.SubResourceClient("binding").Create(ctx, pod, binding) + // + // - CertificateSigningRequest approval: + // csr := &certificatesv1.CertificateSigningRequest{ + // ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}, + // Status: certificatesv1.CertificateSigningRequestStatus{ + // Conditions: []certificatesv1.[]CertificateSigningRequestCondition{{ + // Type: certificatesv1.CertificateApproved, + // Status: corev1.ConditionTrue, + // }}, + // }, + // } + // c.SubResourceClient("approval").Update(ctx, csr) + // + // - Scale retrieval: + // dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}} + // scale := &autoscalingv1.Scale{} + // c.SubResourceClient("scale").Get(ctx, dep, scale) + // + // - Scale update: + // dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}} + // scale := &autoscalingv1.Scale{Spec: autoscalingv1.ScaleSpec{Replicas: 2}} + // c.SubResourceClient("scale").Update(ctx, dep, client.WithSubResourceBody(scale)) + SubResource(subResource string) SubResourceClient +} + +// StatusWriter is kept for backward compatibility. +type StatusWriter = SubResourceWriter + +// SubResourceReader knows how to read SubResources +type SubResourceReader interface { + Get(ctx context.Context, obj Object, subResource Object, opts ...SubResourceGetOption) error +} + +// SubResourceWriter knows how to update subresource of a Kubernetes object. +type SubResourceWriter interface { + // Create saves the subResource object in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Create(ctx context.Context, obj Object, subResource Object, opts ...SubResourceCreateOption) error // Update updates the fields corresponding to the status subresource for the // given obj. obj must be a struct pointer so that obj can be updated // with the content returned by the Server. - Update(ctx context.Context, obj Object, opts ...UpdateOption) error + Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error // Patch patches the given object's subresource. obj must be a struct // pointer so that obj can be updated with the content returned by the // Server. - Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error + Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error +} + +// SubResourceClient knows how to perform CRU operations on Kubernetes objects. +type SubResourceClient interface { + SubResourceReader + SubResourceWriter } // Client knows how to perform CRUD operations on Kubernetes objects. @@ -102,11 +164,16 @@ type Client interface { Reader Writer StatusClient + SubResourceClientConstructor // Scheme returns the scheme this client is using. Scheme() *runtime.Scheme // RESTMapper returns the rest this client is using. RESTMapper() meta.RESTMapper + // GroupVersionKindFor returns the GroupVersionKind for the given object. + GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) + // IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. + IsObjectNamespaced(obj runtime.Object) (bool, error) } // WithWatch supports Watch on top of the CRUD operations supported by @@ -143,3 +210,13 @@ func IgnoreNotFound(err error) error { } return err } + +// IgnoreAlreadyExists returns nil on AlreadyExists errors. +// All other values that are not AlreadyExists errors or nil are returned unmodified. +func IgnoreAlreadyExists(err error) error { + if apierrors.IsAlreadyExists(err) { + return nil + } + + return err +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go index 59747463a..d0c6b8e13 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go @@ -116,7 +116,7 @@ func (mc *metadataClient) Patch(ctx context.Context, obj Object, patch Patch, op } // Get implements client.Client. -func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object) error { +func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { metadata, ok := obj.(*metav1.PartialObjectMetadata) if !ok { return fmt.Errorf("metadata client did not understand object: %T", obj) @@ -124,12 +124,15 @@ func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object) er gvk := metadata.GroupVersionKind() + getOpts := GetOptions{} + getOpts.ApplyOptions(opts) + resInt, err := mc.getResourceInterface(gvk, key.Namespace) if err != nil { return err } - res, err := resInt.Get(ctx, key.Name, metav1.GetOptions{}) + res, err := resInt.Get(ctx, key.Name, *getOpts.AsGetOptions()) if err != nil { return err } @@ -146,9 +149,7 @@ func (mc *metadataClient) List(ctx context.Context, obj ObjectList, opts ...List } gvk := metadata.GroupVersionKind() - if strings.HasSuffix(gvk.Kind, "List") { - gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] - } + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") listOpts := ListOptions{} listOpts.ApplyOptions(opts) @@ -167,7 +168,7 @@ func (mc *metadataClient) List(ctx context.Context, obj ObjectList, opts ...List return nil } -func (mc *metadataClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { +func (mc *metadataClient) PatchSubResource(ctx context.Context, obj Object, subResource string, patch Patch, opts ...SubResourcePatchOption) error { metadata, ok := obj.(*metav1.PartialObjectMetadata) if !ok { return fmt.Errorf("metadata client did not understand object: %T", obj) @@ -179,16 +180,24 @@ func (mc *metadataClient) PatchStatus(ctx context.Context, obj Object, patch Pat return err } - data, err := patch.Data(obj) + patchOpts := &SubResourcePatchOptions{} + patchOpts.ApplyOptions(opts) + + body := obj + if patchOpts.SubResourceBody != nil { + body = patchOpts.SubResourceBody + } + + data, err := patch.Data(body) if err != nil { return err } - patchOpts := &PatchOptions{} - res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions(), "status") + res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions(), subResource) if err != nil { return err } + *metadata = *res metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata return nil diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go index 557598727..222dc7957 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" + "k8s.io/apimachinery/pkg/runtime/schema" ) // NewNamespacedClient wraps an existing client enforcing the namespace value. @@ -52,11 +52,21 @@ func (n *namespacedClient) RESTMapper() meta.RESTMapper { return n.client.RESTMapper() } -// Create implements clinet.Client. +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (n *namespacedClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return n.client.GroupVersionKindFor(obj) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (n *namespacedClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return n.client.IsObjectNamespaced(obj) +} + +// Create implements client.Client. func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { - return fmt.Errorf("error finding the scope of the object: %v", err) + return fmt.Errorf("error finding the scope of the object: %w", err) } objectNamespace := obj.GetNamespace() @@ -72,9 +82,9 @@ func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...Creat // Update implements client.Client. func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { - return fmt.Errorf("error finding the scope of the object: %v", err) + return fmt.Errorf("error finding the scope of the object: %w", err) } objectNamespace := obj.GetNamespace() @@ -90,9 +100,9 @@ func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...Updat // Delete implements client.Client. func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { - return fmt.Errorf("error finding the scope of the object: %v", err) + return fmt.Errorf("error finding the scope of the object: %w", err) } objectNamespace := obj.GetNamespace() @@ -108,9 +118,9 @@ func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...Delet // DeleteAllOf implements client.Client. func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { - return fmt.Errorf("error finding the scope of the object: %v", err) + return fmt.Errorf("error finding the scope of the object: %w", err) } if isNamespaceScoped { @@ -121,9 +131,9 @@ func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ... // Patch implements client.Client. func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { - return fmt.Errorf("error finding the scope of the object: %v", err) + return fmt.Errorf("error finding the scope of the object: %w", err) } objectNamespace := obj.GetNamespace() @@ -138,18 +148,18 @@ func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, o } // Get implements client.Client. -func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) +func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { - return fmt.Errorf("error finding the scope of the object: %v", err) + return fmt.Errorf("error finding the scope of the object: %w", err) } if isNamespaceScoped { if key.Namespace != "" && key.Namespace != n.namespace { - return fmt.Errorf("namespace %s provided for the object %s does not match the namesapce %s on the client", key.Namespace, obj.GetName(), n.namespace) + return fmt.Errorf("namespace %s provided for the object %s does not match the namespace %s on the client", key.Namespace, obj.GetName(), n.namespace) } key.Namespace = n.namespace } - return n.client.Get(ctx, key, obj) + return n.client.Get(ctx, key, obj, opts...) } // List implements client.Client. @@ -161,25 +171,46 @@ func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...Lis } // Status implements client.StatusClient. -func (n *namespacedClient) Status() StatusWriter { - return &namespacedClientStatusWriter{StatusClient: n.client.Status(), namespace: n.namespace, namespacedclient: n} +func (n *namespacedClient) Status() SubResourceWriter { + return n.SubResource("status") } -// ensure namespacedClientStatusWriter implements client.StatusWriter. -var _ StatusWriter = &namespacedClientStatusWriter{} +// SubResource implements client.SubResourceClient. +func (n *namespacedClient) SubResource(subResource string) SubResourceClient { + return &namespacedClientSubResourceClient{client: n.client.SubResource(subResource), namespace: n.namespace, namespacedclient: n} +} + +// ensure namespacedClientSubResourceClient implements client.SubResourceClient. +var _ SubResourceClient = &namespacedClientSubResourceClient{} -type namespacedClientStatusWriter struct { - StatusClient StatusWriter +type namespacedClientSubResourceClient struct { + client SubResourceClient namespace string namespacedclient Client } -// Update implements client.StatusWriter. -func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) +func (nsw *namespacedClientSubResourceClient) Get(ctx context.Context, obj, subResource Object, opts ...SubResourceGetOption) error { + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespace) + } + + return nsw.client.Get(ctx, obj, subResource, opts...) +} +func (nsw *namespacedClientSubResourceClient) Create(ctx context.Context, obj, subResource Object, opts ...SubResourceCreateOption) error { + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) if err != nil { - return fmt.Errorf("error finding the scope of the object: %v", err) + return fmt.Errorf("error finding the scope of the object: %w", err) } objectNamespace := obj.GetNamespace() @@ -190,15 +221,33 @@ func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, if isNamespaceScoped && objectNamespace == "" { obj.SetNamespace(nsw.namespace) } - return nsw.StatusClient.Update(ctx, obj, opts...) + + return nsw.client.Create(ctx, obj, subResource, opts...) } -// Patch implements client.StatusWriter. -func (nsw *namespacedClientStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) +// Update implements client.SubResourceWriter. +func (nsw *namespacedClientSubResourceClient) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespace) + } + return nsw.client.Update(ctx, obj, opts...) +} +// Patch implements client.SubResourceWriter. +func (nsw *namespacedClientSubResourceClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) if err != nil { - return fmt.Errorf("error finding the scope of the object: %v", err) + return fmt.Errorf("error finding the scope of the object: %w", err) } objectNamespace := obj.GetNamespace() @@ -209,5 +258,5 @@ func (nsw *namespacedClientStatusWriter) Patch(ctx context.Context, obj Object, if isNamespaceScoped && objectNamespace == "" { obj.SetNamespace(nsw.namespace) } - return nsw.StatusClient.Patch(ctx, obj, patch, opts...) + return nsw.client.Patch(ctx, obj, patch, opts...) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go index aa2299eac..d81bf25de 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -37,6 +37,12 @@ type DeleteOption interface { ApplyToDelete(*DeleteOptions) } +// GetOption is some configuration that modifies options for a get request. +type GetOption interface { + // ApplyToGet applies this configuration to the given get options. + ApplyToGet(*GetOptions) +} + // ListOption is some configuration that modifies options for a list request. type ListOption interface { // ApplyToList applies this configuration to the given list options. @@ -61,6 +67,29 @@ type DeleteAllOfOption interface { ApplyToDeleteAllOf(*DeleteAllOfOptions) } +// SubResourceGetOption modifies options for a SubResource Get request. +type SubResourceGetOption interface { + ApplyToSubResourceGet(*SubResourceGetOptions) +} + +// SubResourceUpdateOption is some configuration that modifies options for a update request. +type SubResourceUpdateOption interface { + // ApplyToSubResourceUpdate applies this configuration to the given update options. + ApplyToSubResourceUpdate(*SubResourceUpdateOptions) +} + +// SubResourceCreateOption is some configuration that modifies options for a create request. +type SubResourceCreateOption interface { + // ApplyToSubResourceCreate applies this configuration to the given create options. + ApplyToSubResourceCreate(*SubResourceCreateOptions) +} + +// SubResourcePatchOption configures a subresource patch request. +type SubResourcePatchOption interface { + // ApplyToSubResourcePatch applies the configuration on the given patch options. + ApplyToSubResourcePatch(*SubResourcePatchOptions) +} + // }}} // {{{ Multi-Type Options @@ -90,10 +119,23 @@ func (dryRunAll) ApplyToPatch(opts *PatchOptions) { func (dryRunAll) ApplyToDelete(opts *DeleteOptions) { opts.DryRun = []string{metav1.DryRunAll} } + func (dryRunAll) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { opts.DryRun = []string{metav1.DryRunAll} } +func (dryRunAll) ApplyToSubResourceCreate(opts *SubResourceCreateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +func (dryRunAll) ApplyToSubResourceUpdate(opts *SubResourceUpdateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +func (dryRunAll) ApplyToSubResourcePatch(opts *SubResourcePatchOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + // FieldOwner set the field manager name for the given server-side apply patch. type FieldOwner string @@ -112,6 +154,21 @@ func (f FieldOwner) ApplyToUpdate(opts *UpdateOptions) { opts.FieldManager = string(f) } +// ApplyToSubResourcePatch applies this configuration to the given patch options. +func (f FieldOwner) ApplyToSubResourcePatch(opts *SubResourcePatchOptions) { + opts.FieldManager = string(f) +} + +// ApplyToSubResourceCreate applies this configuration to the given create options. +func (f FieldOwner) ApplyToSubResourceCreate(opts *SubResourceCreateOptions) { + opts.FieldManager = string(f) +} + +// ApplyToSubResourceUpdate applies this configuration to the given update options. +func (f FieldOwner) ApplyToSubResourceUpdate(opts *SubResourceUpdateOptions) { + opts.FieldManager = string(f) +} + // }}} // {{{ Create Options @@ -311,6 +368,45 @@ func (p PropagationPolicy) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { // }}} +// {{{ Get Options + +// GetOptions contains options for get operation. +// Now it only has a Raw field, with support for specific resourceVersion. +type GetOptions struct { + // Raw represents raw GetOptions, as passed to the API server. Note + // that these may not be respected by all implementations of interface. + Raw *metav1.GetOptions +} + +var _ GetOption = &GetOptions{} + +// ApplyToGet implements GetOption for GetOptions. +func (o *GetOptions) ApplyToGet(lo *GetOptions) { + if o.Raw != nil { + lo.Raw = o.Raw + } +} + +// AsGetOptions returns these options as a flattened metav1.GetOptions. +// This may mutate the Raw field. +func (o *GetOptions) AsGetOptions() *metav1.GetOptions { + if o == nil || o.Raw == nil { + return &metav1.GetOptions{} + } + return o.Raw +} + +// ApplyOptions applies the given get options on these options, +// and then returns itself (for convenient chaining). +func (o *GetOptions) ApplyOptions(opts []GetOption) *GetOptions { + for _, opt := range opts { + opt.ApplyToGet(o) + } + return o +} + +// }}} + // {{{ List Options // ListOptions contains options for limiting or filtering results. @@ -318,7 +414,7 @@ func (p PropagationPolicy) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { // pre-parsed selectors (since generally, selectors will be executed // against the cache). type ListOptions struct { - // LabelSelector filters results by label. Use SetLabelSelector to + // LabelSelector filters results by label. Use labels.Parse() to // set from raw string form. LabelSelector labels.Selector // FieldSelector filters results by a particular field. In order @@ -341,6 +437,12 @@ type ListOptions struct { // it has expired. This field is not supported if watch is true in the Raw ListOptions. Continue string + // UnsafeDisableDeepCopy indicates not to deep copy objects during list objects. + // Be very careful with this, when enabled you must DeepCopy any object before mutating it, + // otherwise you will mutate the object in the cache. + // +optional + UnsafeDisableDeepCopy *bool + // Raw represents raw ListOptions, as passed to the API server. Note // that these may not be respected by all implementations of interface, // and the LabelSelector, FieldSelector, Limit and Continue fields are ignored. @@ -369,6 +471,9 @@ func (o *ListOptions) ApplyToList(lo *ListOptions) { if o.Continue != "" { lo.Continue = o.Continue } + if o.UnsafeDisableDeepCopy != nil { + lo.UnsafeDisableDeepCopy = o.UnsafeDisableDeepCopy + } } // AsListOptions returns these options as a flattened metav1.ListOptions. @@ -408,8 +513,15 @@ type MatchingLabels map[string]string // ApplyToList applies this configuration to the given list options. func (m MatchingLabels) ApplyToList(opts *ListOptions) { // TODO(directxman12): can we avoid reserializing this over and over? - sel := labels.SelectorFromValidatedSet(map[string]string(m)) - opts.LabelSelector = sel + if opts.LabelSelector == nil { + opts.LabelSelector = labels.NewSelector() + } + // If there's already a selector, we need to AND the two together. + noValidSel := labels.SelectorFromValidatedSet(map[string]string(m)) + reqs, _ := noValidSel.Requirements() + for _, req := range reqs { + opts.LabelSelector = opts.LabelSelector.Add(req) + } } // ApplyToDeleteAllOf applies this configuration to the given an List options. @@ -423,14 +535,17 @@ type HasLabels []string // ApplyToList applies this configuration to the given list options. func (m HasLabels) ApplyToList(opts *ListOptions) { - sel := labels.NewSelector() + if opts.LabelSelector == nil { + opts.LabelSelector = labels.NewSelector() + } + // TODO: ignore invalid labels will result in an empty selector. + // This is inconsistent to the that of MatchingLabels. for _, label := range m { r, err := labels.NewRequirement(label, selection.Exists, nil) if err == nil { - sel = sel.Add(*r) + opts.LabelSelector = opts.LabelSelector.Add(*r) } } - opts.LabelSelector = sel } // ApplyToDeleteAllOf applies this configuration to the given an List options. @@ -501,6 +616,11 @@ func (n InNamespace) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { n.ApplyToList(&opts.ListOptions) } +// AsSelector returns a selector that matches objects in the given namespace. +func (n InNamespace) AsSelector() fields.Selector { + return fields.SelectorFromSet(fields.Set{"metadata.namespace": string(n)}) +} + // Limit specifies the maximum number of results to return from the server. // Limit does not implement DeleteAllOfOption interface because the server // does not support setting it for deletecollection operations. @@ -511,6 +631,25 @@ func (l Limit) ApplyToList(opts *ListOptions) { opts.Limit = int64(l) } +// UnsafeDisableDeepCopyOption indicates not to deep copy objects during list objects. +// Be very careful with this, when enabled you must DeepCopy any object before mutating it, +// otherwise you will mutate the object in the cache. +type UnsafeDisableDeepCopyOption bool + +// ApplyToList applies this configuration to the given an List options. +func (d UnsafeDisableDeepCopyOption) ApplyToList(opts *ListOptions) { + definitelyTrue := true + definitelyFalse := false + if d { + opts.UnsafeDisableDeepCopy = &definitelyTrue + } else { + opts.UnsafeDisableDeepCopy = &definitelyFalse + } +} + +// UnsafeDisableDeepCopy indicates not to deep copy objects during list objects. +const UnsafeDisableDeepCopy = UnsafeDisableDeepCopyOption(true) + // Continue sets a continuation token to retrieve chunks of results when using limit. // Continue does not implement DeleteAllOfOption interface because the server // does not support setting it for deletecollection operations. @@ -664,6 +803,11 @@ func (forceOwnership) ApplyToPatch(opts *PatchOptions) { opts.Force = &definitelyTrue } +func (forceOwnership) ApplyToSubResourcePatch(opts *SubResourcePatchOptions) { + definitelyTrue := true + opts.Force = &definitelyTrue +} + // }}} // {{{ DeleteAllOf Options diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go index 10984c534..11d608388 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go @@ -19,7 +19,7 @@ package client import ( "fmt" - jsonpatch "github.com/evanphx/json-patch" + jsonpatch "github.com/evanphx/json-patch/v5" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/strategicpatch" diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go deleted file mode 100644 index bf4b861f3..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go +++ /dev/null @@ -1,141 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "context" - "strings" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" -) - -// NewDelegatingClientInput encapsulates the input parameters to create a new delegating client. -type NewDelegatingClientInput struct { - CacheReader Reader - Client Client - UncachedObjects []Object - CacheUnstructured bool -} - -// NewDelegatingClient creates a new delegating client. -// -// A delegating client forms a Client by composing separate reader, writer and -// statusclient interfaces. This way, you can have an Client that reads from a -// cache and writes to the API server. -func NewDelegatingClient(in NewDelegatingClientInput) (Client, error) { - uncachedGVKs := map[schema.GroupVersionKind]struct{}{} - for _, obj := range in.UncachedObjects { - gvk, err := apiutil.GVKForObject(obj, in.Client.Scheme()) - if err != nil { - return nil, err - } - uncachedGVKs[gvk] = struct{}{} - } - - return &delegatingClient{ - scheme: in.Client.Scheme(), - mapper: in.Client.RESTMapper(), - Reader: &delegatingReader{ - CacheReader: in.CacheReader, - ClientReader: in.Client, - scheme: in.Client.Scheme(), - uncachedGVKs: uncachedGVKs, - cacheUnstructured: in.CacheUnstructured, - }, - Writer: in.Client, - StatusClient: in.Client, - }, nil -} - -type delegatingClient struct { - Reader - Writer - StatusClient - - scheme *runtime.Scheme - mapper meta.RESTMapper -} - -// Scheme returns the scheme this client is using. -func (d *delegatingClient) Scheme() *runtime.Scheme { - return d.scheme -} - -// RESTMapper returns the rest mapper this client is using. -func (d *delegatingClient) RESTMapper() meta.RESTMapper { - return d.mapper -} - -// delegatingReader forms a Reader that will cause Get and List requests for -// unstructured types to use the ClientReader while requests for any other type -// of object with use the CacheReader. This avoids accidentally caching the -// entire cluster in the common case of loading arbitrary unstructured objects -// (e.g. from OwnerReferences). -type delegatingReader struct { - CacheReader Reader - ClientReader Reader - - uncachedGVKs map[schema.GroupVersionKind]struct{} - scheme *runtime.Scheme - cacheUnstructured bool -} - -func (d *delegatingReader) shouldBypassCache(obj runtime.Object) (bool, error) { - gvk, err := apiutil.GVKForObject(obj, d.scheme) - if err != nil { - return false, err - } - // TODO: this is producing unsafe guesses that don't actually work, - // but it matches ~99% of the cases out there. - if meta.IsListType(obj) { - gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - } - if _, isUncached := d.uncachedGVKs[gvk]; isUncached { - return true, nil - } - if !d.cacheUnstructured { - _, isUnstructured := obj.(*unstructured.Unstructured) - _, isUnstructuredList := obj.(*unstructured.UnstructuredList) - return isUnstructured || isUnstructuredList, nil - } - return false, nil -} - -// Get retrieves an obj for a given object key from the Kubernetes Cluster. -func (d *delegatingReader) Get(ctx context.Context, key ObjectKey, obj Object) error { - if isUncached, err := d.shouldBypassCache(obj); err != nil { - return err - } else if isUncached { - return d.ClientReader.Get(ctx, key, obj) - } - return d.CacheReader.Get(ctx, key, obj) -} - -// List retrieves list of objects for a given namespace and list options. -func (d *delegatingReader) List(ctx context.Context, list ObjectList, opts ...ListOption) error { - if isUncached, err := d.shouldBypassCache(list); err != nil { - return err - } else if isUncached { - return d.ClientReader.List(ctx, list, opts...) - } - return d.CacheReader.List(ctx, list, opts...) -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go index dde7b21f2..92afd9a9c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go @@ -24,24 +24,22 @@ import ( var _ Reader = &typedClient{} var _ Writer = &typedClient{} -var _ StatusWriter = &typedClient{} -// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes -// new clients at the time they are used, and caches the client. type typedClient struct { - cache *clientCache + resources *clientRestResources paramCodec runtime.ParameterCodec } // Create implements client.Client. func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } createOpts := &CreateOptions{} createOpts.ApplyOptions(opts) + return o.Post(). NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). Resource(o.resource()). @@ -53,13 +51,14 @@ func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOpti // Update implements client.Client. func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } updateOpts := &UpdateOptions{} updateOpts.ApplyOptions(opts) + return o.Put(). NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). Resource(o.resource()). @@ -72,7 +71,7 @@ func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOpti // Delete implements client.Client. func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -91,7 +90,7 @@ func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOpti // DeleteAllOf implements client.Client. func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -110,7 +109,7 @@ func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...Delet // Patch implements client.Client. func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -121,36 +120,43 @@ func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts . } patchOpts := &PatchOptions{} + patchOpts.ApplyOptions(opts) + return o.Patch(patch.Type()). NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). Resource(o.resource()). Name(o.GetName()). - VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec). + VersionedParams(patchOpts.AsPatchOptions(), c.paramCodec). Body(data). Do(ctx). Into(obj) } // Get implements client.Client. -func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object) error { - r, err := c.cache.getResource(obj) +func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + r, err := c.resources.getResource(obj) if err != nil { return err } + getOpts := GetOptions{} + getOpts.ApplyOptions(opts) return r.Get(). NamespaceIfScoped(key.Namespace, r.isNamespaced()). Resource(r.resource()). + VersionedParams(getOpts.AsGetOptions(), c.paramCodec). Name(key.Name).Do(ctx).Into(obj) } // List implements client.Client. func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { - r, err := c.cache.getResource(obj) + r, err := c.resources.getResource(obj) if err != nil { return err } + listOpts := ListOptions{} listOpts.ApplyOptions(opts) + return r.Get(). NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). Resource(r.resource()). @@ -159,9 +165,56 @@ func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOpti Into(obj) } -// UpdateStatus used by StatusWriter to write status. -func (c *typedClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error { - o, err := c.cache.getObjMeta(obj) +func (c *typedClient) GetSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceGetOption) error { + o, err := c.resources.getObjMeta(obj) + if err != nil { + return err + } + + if subResourceObj.GetName() == "" { + subResourceObj.SetName(obj.GetName()) + } + + getOpts := &SubResourceGetOptions{} + getOpts.ApplyOptions(opts) + + return o.Get(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource(subResource). + VersionedParams(getOpts.AsGetOptions(), c.paramCodec). + Do(ctx). + Into(subResourceObj) +} + +func (c *typedClient) CreateSubResource(ctx context.Context, obj Object, subResourceObj Object, subResource string, opts ...SubResourceCreateOption) error { + o, err := c.resources.getObjMeta(obj) + if err != nil { + return err + } + + if subResourceObj.GetName() == "" { + subResourceObj.SetName(obj.GetName()) + } + + createOpts := &SubResourceCreateOptions{} + createOpts.ApplyOptions(opts) + + return o.Post(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource(subResource). + Body(subResourceObj). + VersionedParams(createOpts.AsCreateOptions(), c.paramCodec). + Do(ctx). + Into(subResourceObj) +} + +// UpdateSubResource used by SubResourceWriter to write status. +func (c *typedClient) UpdateSubResource(ctx context.Context, obj Object, subResource string, opts ...SubResourceUpdateOption) error { + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -169,37 +222,58 @@ func (c *typedClient) UpdateStatus(ctx context.Context, obj Object, opts ...Upda // wrapped to improve the UX ? // It will be nice to receive an error saying the object doesn't implement // status subresource and check CRD definition + updateOpts := &SubResourceUpdateOptions{} + updateOpts.ApplyOptions(opts) + + body := obj + if updateOpts.SubResourceBody != nil { + body = updateOpts.SubResourceBody + } + if body.GetName() == "" { + body.SetName(obj.GetName()) + } + if body.GetNamespace() == "" { + body.SetNamespace(obj.GetNamespace()) + } + return o.Put(). NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). Resource(o.resource()). Name(o.GetName()). - SubResource("status"). - Body(obj). - VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), c.paramCodec). + SubResource(subResource). + Body(body). + VersionedParams(updateOpts.AsUpdateOptions(), c.paramCodec). Do(ctx). - Into(obj) + Into(body) } -// PatchStatus used by StatusWriter to write status. -func (c *typedClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { - o, err := c.cache.getObjMeta(obj) +// PatchSubResource used by SubResourceWriter to write subresource. +func (c *typedClient) PatchSubResource(ctx context.Context, obj Object, subResource string, patch Patch, opts ...SubResourcePatchOption) error { + o, err := c.resources.getObjMeta(obj) if err != nil { return err } - data, err := patch.Data(obj) + patchOpts := &SubResourcePatchOptions{} + patchOpts.ApplyOptions(opts) + + body := obj + if patchOpts.SubResourceBody != nil { + body = patchOpts.SubResourceBody + } + + data, err := patch.Data(body) if err != nil { return err } - patchOpts := &PatchOptions{} return o.Patch(patch.Type()). NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). Resource(o.resource()). Name(o.GetName()). - SubResource("status"). + SubResource(subResource). Body(data). - VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec). + VersionedParams(patchOpts.AsPatchOptions(), c.paramCodec). Do(ctx). - Into(obj) + Into(body) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go index dcf15be27..0d9695178 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go @@ -21,37 +21,34 @@ import ( "fmt" "strings" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" ) var _ Reader = &unstructuredClient{} var _ Writer = &unstructuredClient{} -var _ StatusWriter = &unstructuredClient{} -// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes -// new clients at the time they are used, and caches the client. type unstructuredClient struct { - cache *clientCache + resources *clientRestResources paramCodec runtime.ParameterCodec } // Create implements client.Client. func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { - u, ok := obj.(*unstructured.Unstructured) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } createOpts := &CreateOptions{} createOpts.ApplyOptions(opts) + result := o.Post(). NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). Resource(o.resource()). @@ -60,26 +57,27 @@ func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...Cr Do(ctx). Into(obj) - u.SetGroupVersionKind(gvk) + u.GetObjectKind().SetGroupVersionKind(gvk) return result } // Update implements client.Client. func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { - u, ok := obj.(*unstructured.Unstructured) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } updateOpts := UpdateOptions{} updateOpts.ApplyOptions(opts) + result := o.Put(). NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). Resource(o.resource()). @@ -89,24 +87,24 @@ func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...Up Do(ctx). Into(obj) - u.SetGroupVersionKind(gvk) + u.GetObjectKind().SetGroupVersionKind(gvk) return result } // Delete implements client.Client. func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { - _, ok := obj.(*unstructured.Unstructured) - if !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } deleteOpts := DeleteOptions{} deleteOpts.ApplyOptions(opts) + return o.Delete(). NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). Resource(o.resource()). @@ -118,18 +116,18 @@ func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...De // DeleteAllOf implements client.Client. func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { - _, ok := obj.(*unstructured.Unstructured) - if !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } deleteAllOfOpts := DeleteAllOfOptions{} deleteAllOfOpts.ApplyOptions(opts) + return o.Delete(). NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()). Resource(o.resource()). @@ -141,12 +139,11 @@ func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts // Patch implements client.Client. func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { - _, ok := obj.(*unstructured.Unstructured) - if !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -157,26 +154,31 @@ func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch } patchOpts := &PatchOptions{} + patchOpts.ApplyOptions(opts) + return o.Patch(patch.Type()). NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). Resource(o.resource()). Name(o.GetName()). - VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), uc.paramCodec). + VersionedParams(patchOpts.AsPatchOptions(), uc.paramCodec). Body(data). Do(ctx). Into(obj) } // Get implements client.Client. -func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object) error { - u, ok := obj.(*unstructured.Unstructured) +func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() + + getOpts := GetOptions{} + getOpts.ApplyOptions(opts) - r, err := uc.cache.getResource(obj) + r, err := uc.resources.getResource(obj) if err != nil { return err } @@ -184,35 +186,34 @@ func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object result := r.Get(). NamespaceIfScoped(key.Namespace, r.isNamespaced()). Resource(r.resource()). + VersionedParams(getOpts.AsGetOptions(), uc.paramCodec). Name(key.Name). Do(ctx). Into(obj) - u.SetGroupVersionKind(gvk) + u.GetObjectKind().SetGroupVersionKind(gvk) return result } // List implements client.Client. func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { - u, ok := obj.(*unstructured.UnstructuredList) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() - if strings.HasSuffix(gvk.Kind, "List") { - gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] - } + gvk := u.GetObjectKind().GroupVersionKind() + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - listOpts := ListOptions{} - listOpts.ApplyOptions(opts) - - r, err := uc.cache.getResource(obj) + r, err := uc.resources.getResource(obj) if err != nil { return err } + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + return r.Get(). NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). Resource(r.resource()). @@ -221,57 +222,140 @@ func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ... Into(obj) } -func (uc *unstructuredClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error { - _, ok := obj.(*unstructured.Unstructured) - if !ok { +func (uc *unstructuredClient) GetSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceGetOption) error { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - o, err := uc.cache.getObjMeta(obj) + if _, ok := subResourceObj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", subResourceObj) + } + + if subResourceObj.GetName() == "" { + subResourceObj.SetName(obj.GetName()) + } + + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } + getOpts := &SubResourceGetOptions{} + getOpts.ApplyOptions(opts) + + return o.Get(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource(subResource). + VersionedParams(getOpts.AsGetOptions(), uc.paramCodec). + Do(ctx). + Into(subResourceObj) +} + +func (uc *unstructuredClient) CreateSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceCreateOption) error { + if _, ok := obj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + if _, ok := subResourceObj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", subResourceObj) + } + + if subResourceObj.GetName() == "" { + subResourceObj.SetName(obj.GetName()) + } + + o, err := uc.resources.getObjMeta(obj) + if err != nil { + return err + } + + createOpts := &SubResourceCreateOptions{} + createOpts.ApplyOptions(opts) + + return o.Post(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource(subResource). + Body(subResourceObj). + VersionedParams(createOpts.AsCreateOptions(), uc.paramCodec). + Do(ctx). + Into(subResourceObj) +} + +func (uc *unstructuredClient) UpdateSubResource(ctx context.Context, obj Object, subResource string, opts ...SubResourceUpdateOption) error { + if _, ok := obj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.resources.getObjMeta(obj) + if err != nil { + return err + } + + updateOpts := SubResourceUpdateOptions{} + updateOpts.ApplyOptions(opts) + + body := obj + if updateOpts.SubResourceBody != nil { + body = updateOpts.SubResourceBody + } + if body.GetName() == "" { + body.SetName(obj.GetName()) + } + if body.GetNamespace() == "" { + body.SetNamespace(obj.GetNamespace()) + } + return o.Put(). NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). Resource(o.resource()). Name(o.GetName()). - SubResource("status"). - Body(obj). - VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), uc.paramCodec). + SubResource(subResource). + Body(body). + VersionedParams(updateOpts.AsUpdateOptions(), uc.paramCodec). Do(ctx). - Into(obj) + Into(body) } -func (uc *unstructuredClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { - u, ok := obj.(*unstructured.Unstructured) +func (uc *unstructuredClient) PatchSubResource(ctx context.Context, obj Object, subResource string, patch Patch, opts ...SubResourcePatchOption) error { + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } - data, err := patch.Data(obj) + patchOpts := &SubResourcePatchOptions{} + patchOpts.ApplyOptions(opts) + + body := obj + if patchOpts.SubResourceBody != nil { + body = patchOpts.SubResourceBody + } + + data, err := patch.Data(body) if err != nil { return err } - patchOpts := &PatchOptions{} result := o.Patch(patch.Type()). NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). Resource(o.resource()). Name(o.GetName()). - SubResource("status"). + SubResource(subResource). Body(data). - VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), uc.paramCodec). + VersionedParams(patchOpts.AsPatchOptions(), uc.paramCodec). Do(ctx). - Into(u) + Into(body) - u.SetGroupVersionKind(gvk) + u.GetObjectKind().SetGroupVersionKind(gvk) return result } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go index 765ca5daa..181b22a67 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go @@ -21,9 +21,8 @@ import ( "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" ) @@ -33,21 +32,16 @@ func NewWithWatch(config *rest.Config, options Options) (WithWatch, error) { if err != nil { return nil, err } - dynamicClient, err := dynamic.NewForConfig(config) - if err != nil { - return nil, err - } - return &watchingClient{client: client, dynamic: dynamicClient}, nil + return &watchingClient{client: client}, nil } type watchingClient struct { *client - dynamic dynamic.Interface } func (w *watchingClient) Watch(ctx context.Context, list ObjectList, opts ...ListOption) (watch.Interface, error) { switch l := list.(type) { - case *unstructured.UnstructuredList: + case runtime.Unstructured: return w.unstructuredWatch(ctx, l, opts...) case *metav1.PartialObjectMetadataList: return w.metadataWatch(ctx, l, opts...) @@ -69,9 +63,7 @@ func (w *watchingClient) listOpts(opts ...ListOption) ListOptions { func (w *watchingClient) metadataWatch(ctx context.Context, obj *metav1.PartialObjectMetadataList, opts ...ListOption) (watch.Interface, error) { gvk := obj.GroupVersionKind() - if strings.HasSuffix(gvk.Kind, "List") { - gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] - } + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") listOpts := w.listOpts(opts...) @@ -83,27 +75,23 @@ func (w *watchingClient) metadataWatch(ctx context.Context, obj *metav1.PartialO return resInt.Watch(ctx, *listOpts.AsListOptions()) } -func (w *watchingClient) unstructuredWatch(ctx context.Context, obj *unstructured.UnstructuredList, opts ...ListOption) (watch.Interface, error) { - gvk := obj.GroupVersionKind() - if strings.HasSuffix(gvk.Kind, "List") { - gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] - } - - r, err := w.client.unstructuredClient.cache.getResource(obj) +func (w *watchingClient) unstructuredWatch(ctx context.Context, obj runtime.Unstructured, opts ...ListOption) (watch.Interface, error) { + r, err := w.client.unstructuredClient.resources.getResource(obj) if err != nil { return nil, err } listOpts := w.listOpts(opts...) - if listOpts.Namespace != "" && r.isNamespaced() { - return w.dynamic.Resource(r.mapping.Resource).Namespace(listOpts.Namespace).Watch(ctx, *listOpts.AsListOptions()) - } - return w.dynamic.Resource(r.mapping.Resource).Watch(ctx, *listOpts.AsListOptions()) + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), w.client.unstructuredClient.paramCodec). + Watch(ctx) } func (w *watchingClient) typedWatch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error) { - r, err := w.client.typedClient.cache.getResource(obj) + r, err := w.client.typedClient.resources.getResource(obj) if err != nil { return nil, err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go new file mode 100644 index 000000000..344abcd28 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go @@ -0,0 +1,394 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllerutil + +import ( + "context" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// AlreadyOwnedError is an error returned if the object you are trying to assign +// a controller reference is already owned by another controller Object is the +// subject and Owner is the reference for the current owner. +type AlreadyOwnedError struct { + Object metav1.Object + Owner metav1.OwnerReference +} + +func (e *AlreadyOwnedError) Error() string { + return fmt.Sprintf("Object %s/%s is already owned by another %s controller %s", e.Object.GetNamespace(), e.Object.GetName(), e.Owner.Kind, e.Owner.Name) +} + +func newAlreadyOwnedError(obj metav1.Object, owner metav1.OwnerReference) *AlreadyOwnedError { + return &AlreadyOwnedError{ + Object: obj, + Owner: owner, + } +} + +// SetControllerReference sets owner as a Controller OwnerReference on controlled. +// This is used for garbage collection of the controlled object and for +// reconciling the owner object on changes to controlled (with a Watch + EnqueueRequestForOwner). +// Since only one OwnerReference can be a controller, it returns an error if +// there is another OwnerReference with Controller flag set. +func SetControllerReference(owner, controlled metav1.Object, scheme *runtime.Scheme) error { + // Validate the owner. + ro, ok := owner.(runtime.Object) + if !ok { + return fmt.Errorf("%T is not a runtime.Object, cannot call SetControllerReference", owner) + } + if err := validateOwner(owner, controlled); err != nil { + return err + } + + // Create a new controller ref. + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return err + } + ref := metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + Name: owner.GetName(), + UID: owner.GetUID(), + BlockOwnerDeletion: pointer.Bool(true), + Controller: pointer.Bool(true), + } + + // Return early with an error if the object is already controlled. + if existing := metav1.GetControllerOf(controlled); existing != nil && !referSameObject(*existing, ref) { + return newAlreadyOwnedError(controlled, *existing) + } + + // Update owner references and return. + upsertOwnerRef(ref, controlled) + return nil +} + +// SetOwnerReference is a helper method to make sure the given object contains an object reference to the object provided. +// This allows you to declare that owner has a dependency on the object without specifying it as a controller. +// If a reference to the same object already exists, it'll be overwritten with the newly provided version. +func SetOwnerReference(owner, object metav1.Object, scheme *runtime.Scheme) error { + // Validate the owner. + ro, ok := owner.(runtime.Object) + if !ok { + return fmt.Errorf("%T is not a runtime.Object, cannot call SetOwnerReference", owner) + } + if err := validateOwner(owner, object); err != nil { + return err + } + + // Create a new owner ref. + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return err + } + ref := metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + UID: owner.GetUID(), + Name: owner.GetName(), + } + + // Update owner references and return. + upsertOwnerRef(ref, object) + return nil +} + +func upsertOwnerRef(ref metav1.OwnerReference, object metav1.Object) { + owners := object.GetOwnerReferences() + if idx := indexOwnerRef(owners, ref); idx == -1 { + owners = append(owners, ref) + } else { + owners[idx] = ref + } + object.SetOwnerReferences(owners) +} + +// indexOwnerRef returns the index of the owner reference in the slice if found, or -1. +func indexOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) int { + for index, r := range ownerReferences { + if referSameObject(r, ref) { + return index + } + } + return -1 +} + +func validateOwner(owner, object metav1.Object) error { + ownerNs := owner.GetNamespace() + if ownerNs != "" { + objNs := object.GetNamespace() + if objNs == "" { + return fmt.Errorf("cluster-scoped resource must not have a namespace-scoped owner, owner's namespace %s", ownerNs) + } + if ownerNs != objNs { + return fmt.Errorf("cross-namespace owner references are disallowed, owner's namespace %s, obj's namespace %s", owner.GetNamespace(), object.GetNamespace()) + } + } + return nil +} + +// Returns true if a and b point to the same object. +func referSameObject(a, b metav1.OwnerReference) bool { + aGV, err := schema.ParseGroupVersion(a.APIVersion) + if err != nil { + return false + } + + bGV, err := schema.ParseGroupVersion(b.APIVersion) + if err != nil { + return false + } + + return aGV.Group == bGV.Group && a.Kind == b.Kind && a.Name == b.Name +} + +// OperationResult is the action result of a CreateOrUpdate call. +type OperationResult string + +const ( // They should complete the sentence "Deployment default/foo has been ..." + // OperationResultNone means that the resource has not been changed. + OperationResultNone OperationResult = "unchanged" + // OperationResultCreated means that a new resource is created. + OperationResultCreated OperationResult = "created" + // OperationResultUpdated means that an existing resource is updated. + OperationResultUpdated OperationResult = "updated" + // OperationResultUpdatedStatus means that an existing resource and its status is updated. + OperationResultUpdatedStatus OperationResult = "updatedStatus" + // OperationResultUpdatedStatusOnly means that only an existing status is updated. + OperationResultUpdatedStatusOnly OperationResult = "updatedStatusOnly" +) + +// CreateOrUpdate creates or updates the given object in the Kubernetes +// cluster. The object's desired state must be reconciled with the existing +// state inside the passed in callback MutateFn. +// +// The MutateFn is called regardless of creating or updating an object. +// +// It returns the executed operation and an error. +func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f MutateFn) (OperationResult, error) { + key := client.ObjectKeyFromObject(obj) + if err := c.Get(ctx, key, obj); err != nil { + if !apierrors.IsNotFound(err) { + return OperationResultNone, err + } + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + if err := c.Create(ctx, obj); err != nil { + return OperationResultNone, err + } + return OperationResultCreated, nil + } + + existing := obj.DeepCopyObject() + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + + if equality.Semantic.DeepEqual(existing, obj) { + return OperationResultNone, nil + } + + if err := c.Update(ctx, obj); err != nil { + return OperationResultNone, err + } + return OperationResultUpdated, nil +} + +// CreateOrPatch creates or patches the given object in the Kubernetes +// cluster. The object's desired state must be reconciled with the before +// state inside the passed in callback MutateFn. +// +// The MutateFn is called regardless of creating or updating an object. +// +// It returns the executed operation and an error. +func CreateOrPatch(ctx context.Context, c client.Client, obj client.Object, f MutateFn) (OperationResult, error) { + key := client.ObjectKeyFromObject(obj) + if err := c.Get(ctx, key, obj); err != nil { + if !apierrors.IsNotFound(err) { + return OperationResultNone, err + } + if f != nil { + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + } + if err := c.Create(ctx, obj); err != nil { + return OperationResultNone, err + } + return OperationResultCreated, nil + } + + // Create patches for the object and its possible status. + objPatch := client.MergeFrom(obj.DeepCopyObject().(client.Object)) + statusPatch := client.MergeFrom(obj.DeepCopyObject().(client.Object)) + + // Create a copy of the original object as well as converting that copy to + // unstructured data. + before, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj.DeepCopyObject()) + if err != nil { + return OperationResultNone, err + } + + // Attempt to extract the status from the resource for easier comparison later + beforeStatus, hasBeforeStatus, err := unstructured.NestedFieldCopy(before, "status") + if err != nil { + return OperationResultNone, err + } + + // If the resource contains a status then remove it from the unstructured + // copy to avoid unnecessary patching later. + if hasBeforeStatus { + unstructured.RemoveNestedField(before, "status") + } + + // Mutate the original object. + if f != nil { + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + } + + // Convert the resource to unstructured to compare against our before copy. + after, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return OperationResultNone, err + } + + // Attempt to extract the status from the resource for easier comparison later + afterStatus, hasAfterStatus, err := unstructured.NestedFieldCopy(after, "status") + if err != nil { + return OperationResultNone, err + } + + // If the resource contains a status then remove it from the unstructured + // copy to avoid unnecessary patching later. + if hasAfterStatus { + unstructured.RemoveNestedField(after, "status") + } + + result := OperationResultNone + + if !reflect.DeepEqual(before, after) { + // Only issue a Patch if the before and after resources (minus status) differ + if err := c.Patch(ctx, obj, objPatch); err != nil { + return result, err + } + result = OperationResultUpdated + } + + if (hasBeforeStatus || hasAfterStatus) && !reflect.DeepEqual(beforeStatus, afterStatus) { + // Only issue a Status Patch if the resource has a status and the beforeStatus + // and afterStatus copies differ + if result == OperationResultUpdated { + // If Status was replaced by Patch before, set it to afterStatus + objectAfterPatch, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return result, err + } + if err = unstructured.SetNestedField(objectAfterPatch, afterStatus, "status"); err != nil { + return result, err + } + // If Status was replaced by Patch before, restore patched structure to the obj + if err = runtime.DefaultUnstructuredConverter.FromUnstructured(objectAfterPatch, obj); err != nil { + return result, err + } + } + if err := c.Status().Patch(ctx, obj, statusPatch); err != nil { + return result, err + } + if result == OperationResultUpdated { + result = OperationResultUpdatedStatus + } else { + result = OperationResultUpdatedStatusOnly + } + } + + return result, nil +} + +// mutate wraps a MutateFn and applies validation to its result. +func mutate(f MutateFn, key client.ObjectKey, obj client.Object) error { + if err := f(); err != nil { + return err + } + if newKey := client.ObjectKeyFromObject(obj); key != newKey { + return fmt.Errorf("MutateFn cannot mutate object name and/or object namespace") + } + return nil +} + +// MutateFn is a function which mutates the existing object into its desired state. +type MutateFn func() error + +// AddFinalizer accepts an Object and adds the provided finalizer if not present. +// It returns an indication of whether it updated the object's list of finalizers. +func AddFinalizer(o client.Object, finalizer string) (finalizersUpdated bool) { + f := o.GetFinalizers() + for _, e := range f { + if e == finalizer { + return false + } + } + o.SetFinalizers(append(f, finalizer)) + return true +} + +// RemoveFinalizer accepts an Object and removes the provided finalizer if present. +// It returns an indication of whether it updated the object's list of finalizers. +func RemoveFinalizer(o client.Object, finalizer string) (finalizersUpdated bool) { + f := o.GetFinalizers() + for i := 0; i < len(f); i++ { + if f[i] == finalizer { + f = append(f[:i], f[i+1:]...) + i-- + finalizersUpdated = true + } + } + o.SetFinalizers(f) + return +} + +// ContainsFinalizer checks an Object that the provided finalizer is present. +func ContainsFinalizer(o client.Object, finalizer string) bool { + f := o.GetFinalizers() + for _, e := range f { + if e == finalizer { + return true + } + } + return false +} + +// Object allows functions to work indistinctly with any resource that +// implements both Object interfaces. +// +// Deprecated: Use client.Object instead. +type Object = client.Object diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go new file mode 100644 index 000000000..ab386b29c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package controllerutil contains utility functions for working with and implementing Controllers. +*/ +package controllerutil diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go deleted file mode 100644 index 7057f3dbe..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package objectutil - -import ( - "errors" - "fmt" - - apimeta "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" -) - -// FilterWithLabels returns a copy of the items in objs matching labelSel. -func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) { - outItems := make([]runtime.Object, 0, len(objs)) - for _, obj := range objs { - meta, err := apimeta.Accessor(obj) - if err != nil { - return nil, err - } - if labelSel != nil { - lbls := labels.Set(meta.GetLabels()) - if !labelSel.Matches(lbls) { - continue - } - } - outItems = append(outItems, obj.DeepCopyObject()) - } - return outItems, nil -} - -// IsAPINamespaced returns true if the object is namespace scoped. -// For unstructured objects the gvk is found from the object itself. -func IsAPINamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { - gvk, err := apiutil.GVKForObject(obj, scheme) - if err != nil { - return false, err - } - - return IsAPINamespacedWithGVK(gvk, scheme, restmapper) -} - -// IsAPINamespacedWithGVK returns true if the object having the provided -// GVK is namespace scoped. -func IsAPINamespacedWithGVK(gk schema.GroupVersionKind, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { - restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gk.Group, Kind: gk.Kind}) - if err != nil { - return false, fmt.Errorf("failed to get restmapping: %w", err) - } - - scope := restmapping.Scope.Name() - - if scope == "" { - return false, errors.New("scope cannot be identified, empty scope returned") - } - - if scope != apimeta.RESTScopeNameRoot { - return true, nil - } - return false, nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go index c15e73cff..c27b4305f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go @@ -25,7 +25,7 @@ import ( // loggerPromise knows how to populate a concrete logr.Logger // with options, given an actual base logger later on down the line. type loggerPromise struct { - logger *DelegatingLogSink + logger *delegatingLogSink childPromises []*loggerPromise promisesLock sync.Mutex @@ -33,7 +33,7 @@ type loggerPromise struct { tags []interface{} } -func (p *loggerPromise) WithName(l *DelegatingLogSink, name string) *loggerPromise { +func (p *loggerPromise) WithName(l *delegatingLogSink, name string) *loggerPromise { res := &loggerPromise{ logger: l, name: &name, @@ -47,7 +47,7 @@ func (p *loggerPromise) WithName(l *DelegatingLogSink, name string) *loggerPromi } // WithValues provides a new Logger with the tags appended. -func (p *loggerPromise) WithValues(l *DelegatingLogSink, tags ...interface{}) *loggerPromise { +func (p *loggerPromise) WithValues(l *delegatingLogSink, tags ...interface{}) *loggerPromise { res := &loggerPromise{ logger: l, tags: tags, @@ -73,6 +73,9 @@ func (p *loggerPromise) Fulfill(parentLogSink logr.LogSink) { p.logger.lock.Lock() p.logger.logger = sink + if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok { + p.logger.logger = withCallDepth.WithCallDepth(1) + } p.logger.promise = nil p.logger.lock.Unlock() @@ -81,12 +84,12 @@ func (p *loggerPromise) Fulfill(parentLogSink logr.LogSink) { } } -// DelegatingLogSink is a logsink that delegates to another logr.LogSink. +// delegatingLogSink is a logsink that delegates to another logr.LogSink. // If the underlying promise is not nil, it registers calls to sub-loggers with // the logging factory to be populated later, and returns a new delegating // logger. It expects to have *some* logr.Logger set at all times (generally // a no-op logger before the promises are fulfilled). -type DelegatingLogSink struct { +type delegatingLogSink struct { lock sync.RWMutex logger logr.LogSink promise *loggerPromise @@ -94,7 +97,8 @@ type DelegatingLogSink struct { } // Init implements logr.LogSink. -func (l *DelegatingLogSink) Init(info logr.RuntimeInfo) { +func (l *delegatingLogSink) Init(info logr.RuntimeInfo) { + eventuallyFulfillRoot() l.lock.Lock() defer l.lock.Unlock() l.info = info @@ -103,7 +107,8 @@ func (l *DelegatingLogSink) Init(info logr.RuntimeInfo) { // Enabled tests whether this Logger is enabled. For example, commandline // flags might be used to set the logging verbosity and disable some info // logs. -func (l *DelegatingLogSink) Enabled(level int) bool { +func (l *delegatingLogSink) Enabled(level int) bool { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() return l.logger.Enabled(level) @@ -115,7 +120,8 @@ func (l *DelegatingLogSink) Enabled(level int) bool { // the log line. The key/value pairs can then be used to add additional // variable information. The key/value pairs should alternate string // keys and arbitrary values. -func (l *DelegatingLogSink) Info(level int, msg string, keysAndValues ...interface{}) { +func (l *delegatingLogSink) Info(level int, msg string, keysAndValues ...interface{}) { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() l.logger.Info(level, msg, keysAndValues...) @@ -129,22 +135,28 @@ func (l *DelegatingLogSink) Info(level int, msg string, keysAndValues ...interfa // The msg field should be used to add context to any underlying error, // while the err field should be used to attach the actual error that // triggered this log line, if present. -func (l *DelegatingLogSink) Error(err error, msg string, keysAndValues ...interface{}) { +func (l *delegatingLogSink) Error(err error, msg string, keysAndValues ...interface{}) { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() l.logger.Error(err, msg, keysAndValues...) } // WithName provides a new Logger with the name appended. -func (l *DelegatingLogSink) WithName(name string) logr.LogSink { +func (l *delegatingLogSink) WithName(name string) logr.LogSink { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() if l.promise == nil { - return l.logger.WithName(name) + sink := l.logger.WithName(name) + if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok { + sink = withCallDepth.WithCallDepth(-1) + } + return sink } - res := &DelegatingLogSink{logger: l.logger} + res := &delegatingLogSink{logger: l.logger} promise := l.promise.WithName(res, name) res.promise = promise @@ -152,15 +164,20 @@ func (l *DelegatingLogSink) WithName(name string) logr.LogSink { } // WithValues provides a new Logger with the tags appended. -func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { +func (l *delegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() if l.promise == nil { - return l.logger.WithValues(tags...) + sink := l.logger.WithValues(tags...) + if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok { + sink = withCallDepth.WithCallDepth(-1) + } + return sink } - res := &DelegatingLogSink{logger: l.logger} + res := &delegatingLogSink{logger: l.logger} promise := l.promise.WithValues(res, tags...) res.promise = promise @@ -170,16 +187,16 @@ func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { // Fulfill switches the logger over to use the actual logger // provided, instead of the temporary initial one, if this method // has not been previously called. -func (l *DelegatingLogSink) Fulfill(actual logr.LogSink) { +func (l *delegatingLogSink) Fulfill(actual logr.LogSink) { if l.promise != nil { l.promise.Fulfill(actual) } } -// NewDelegatingLogSink constructs a new DelegatingLogSink which uses -// the given logger before it's promise is fulfilled. -func NewDelegatingLogSink(initial logr.LogSink) *DelegatingLogSink { - l := &DelegatingLogSink{ +// newDelegatingLogSink constructs a new DelegatingLogSink which uses +// the given logger before its promise is fulfilled. +func newDelegatingLogSink(initial logr.LogSink) *delegatingLogSink { + l := &delegatingLogSink{ logger: initial, promise: &loggerPromise{promisesLock: sync.Mutex{}}, } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go index 76950cc34..a79151c69 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go @@ -17,7 +17,7 @@ limitations under the License. // Package log contains utilities for fetching a new logger // when one is not already available. // -// The Log Handle +// # The Log Handle // // This package contains a root logr.Logger Log. It may be used to // get a handle to whatever the root logging implementation is. By @@ -25,17 +25,20 @@ limitations under the License. // to loggers. When the implementation is set using SetLogger, these // "promises" will be converted over to real loggers. // -// Logr +// # Logr // // All logging in controller-runtime is structured, using a set of interfaces // defined by a package called logr -// (https://godoc.org/github.com/go-logr/logr). The sub-package zap provides +// (https://pkg.go.dev/github.com/go-logr/logr). The sub-package zap provides // helpers for setting up logr backed by Zap (go.uber.org/zap). package log import ( "context" - "sync" + "fmt" + "os" + "runtime/debug" + "sync/atomic" "time" "github.com/go-logr/logr" @@ -43,35 +46,24 @@ import ( // SetLogger sets a concrete logging implementation for all deferred Loggers. func SetLogger(l logr.Logger) { - loggerWasSetLock.Lock() - defer loggerWasSetLock.Unlock() - - loggerWasSet = true - dlog.Fulfill(l.GetSink()) + logFullfilled.Store(true) + rootLog.Fulfill(l.GetSink()) } -// It is safe to assume that if this wasn't set within the first 30 seconds of a binaries -// lifetime, it will never get set. The DelegatingLogSink causes a high number of memory -// allocations when not given an actual Logger, so we set a NullLogSink to avoid that. -// -// We need to keep the DelegatingLogSink because we have various inits() that get a logger from -// here. They will always get executed before any code that imports controller-runtime -// has a chance to run and hence to set an actual logger. -func init() { - // Init is blocking, so start a new goroutine - go func() { - time.Sleep(30 * time.Second) - loggerWasSetLock.Lock() - defer loggerWasSetLock.Unlock() - if !loggerWasSet { - dlog.Fulfill(NullLogSink{}) +func eventuallyFulfillRoot() { + if logFullfilled.Load() { + return + } + if time.Since(rootLogCreated).Seconds() >= 30 { + if logFullfilled.CompareAndSwap(false, true) { + fmt.Fprintf(os.Stderr, "[controller-runtime] log.SetLogger(...) was never called, logs will not be displayed:\n%s", debug.Stack()) + SetLogger(logr.New(NullLogSink{})) } - }() + } } var ( - loggerWasSetLock sync.Mutex - loggerWasSet bool + logFullfilled atomic.Bool ) // Log is the base logger used by kubebuilder. It delegates @@ -80,8 +72,10 @@ var ( // the first 30 seconds of a binaries lifetime, it will get // set to a NullLogSink. var ( - dlog = NewDelegatingLogSink(NullLogSink{}) - Log = logr.New(dlog) + rootLog, rootLogCreated = func() (*delegatingLogSink, time.Time) { + return newDelegatingLogSink(NullLogSink{}), time.Now() + }() + Log = logr.New(rootLog) ) // FromContext returns a logger with predefined values from a context.Context. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go index 3012fdd41..e9522632d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go @@ -47,7 +47,7 @@ type KubeAPIWarningLogger struct { } // HandleWarningHeader handles logging for responses from API server that are -// warnings with code being 299 and uses a logr.Logger for it's logging purposes. +// warnings with code being 299 and uses a logr.Logger for its logging purposes. func (l *KubeAPIWarningLogger) HandleWarningHeader(code int, agent string, message string) { if code != 299 || len(message) == 0 { return diff --git a/vendor/sigs.k8s.io/e2e-framework/klient/client.go b/vendor/sigs.k8s.io/e2e-framework/klient/client.go index 680e7ad05..67314adec 100644 --- a/vendor/sigs.k8s.io/e2e-framework/klient/client.go +++ b/vendor/sigs.k8s.io/e2e-framework/klient/client.go @@ -17,7 +17,11 @@ limitations under the License. package klient import ( + "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" + "k8s.io/klog/v2" + cr "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/e2e-framework/klient/conf" "sigs.k8s.io/e2e-framework/klient/k8s/resources" ) @@ -38,6 +42,12 @@ type client struct { resources *resources.Resources } +// NewControllerRuntimeClient provides an instance of the Controller runtime client with +// the provided rest config and custom runtime scheme. +func NewControllerRuntimeClient(cfg *rest.Config, scheme *runtime.Scheme) (cr.Client, error) { + return cr.New(cfg, cr.Options{Scheme: scheme}) +} + // New returns a new Client value func New(cfg *rest.Config) (Client, error) { res, err := resources.New(cfg) @@ -74,3 +84,7 @@ func (c *client) Resources(namespace ...string) *resources.Resources { panic("too many namespaces provided") } } + +func init() { + log.SetLogger(klog.NewKlogr()) +} diff --git a/vendor/sigs.k8s.io/e2e-framework/klient/conf/config.go b/vendor/sigs.k8s.io/e2e-framework/klient/conf/config.go index 74a739db9..5e57f80fb 100644 --- a/vendor/sigs.k8s.io/e2e-framework/klient/conf/config.go +++ b/vendor/sigs.k8s.io/e2e-framework/klient/conf/config.go @@ -18,6 +18,7 @@ limitations under the License. package conf import ( + "errors" "flag" "os" "os/user" @@ -33,14 +34,32 @@ var DefaultClusterContext = "" // New returns Kubernetes configuration value of type *rest.Config. // filename is kubeconfig file func New(fileName string) (*rest.Config, error) { - // if filename is not provided assume in-cluster-config + var resolvedKubeConfigFile string + kubeContext := ResolveClusterContext() + + // resolve the kubeconfig file + resolvedKubeConfigFile = fileName if fileName == "" { - return rest.InClusterConfig() + resolvedKubeConfigFile = ResolveKubeConfigFile() + } + + // if resolvedKubeConfigFile is still empty, assume in-cluster config + if resolvedKubeConfigFile == "" { + if kubeContext == "" { + return rest.InClusterConfig() + } + // if in-cluster can't use the --kubeContext flag + return nil, errors.New("cannot use a cluster context without a valid kubeconfig file") + } + + // set the desired context if provided + if kubeContext != "" { + return NewWithContextName(resolvedKubeConfigFile, kubeContext) } - // create the config object from k8s config path + // create the config object from resolvedKubeConfigFile without a context return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( - &clientcmd.ClientConfigLoadingRules{ExplicitPath: fileName}, &clientcmd.ConfigOverrides{}).ClientConfig() + &clientcmd.ClientConfigLoadingRules{ExplicitPath: resolvedKubeConfigFile}, &clientcmd.ConfigOverrides{}).ClientConfig() } // NewWithContextName returns k8s config value of type *rest.Config @@ -110,9 +129,9 @@ func ResolveKubeConfigFile() string { // ResolveClusterContext returns cluster context name based on --context flag. func ResolveClusterContext() string { - // If a flag --kube-context is specified use that + // If a flag --context is specified use that if flag.Parsed() { - f := flag.Lookup("kube-context") + f := flag.Lookup("context") if f != nil { return f.Value.String() } diff --git a/vendor/sigs.k8s.io/e2e-framework/klient/decoder/decoder.go b/vendor/sigs.k8s.io/e2e-framework/klient/decoder/decoder.go new file mode 100644 index 000000000..db58973c3 --- /dev/null +++ b/vendor/sigs.k8s.io/e2e-framework/klient/decoder/decoder.go @@ -0,0 +1,355 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package decoder + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "io/fs" + "os" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/e2e-framework/klient/k8s" + "sigs.k8s.io/e2e-framework/klient/k8s/resources" +) + +// Options are a set of configurations used to instruct the decoding process and otherwise +// alter the output of decoding operations. +type Options struct { + DefaultGVK *schema.GroupVersionKind + MutateFuncs []MutateFunc +} + +// DecodeOption is a function that alters the configuration Options used to decode and optionally mutate objects via MutateFuncs +type DecodeOption func(*Options) + +// MutateFunc is a function executed after an object is decoded to alter its state in a pre-defined way, and can be used to apply defaults. +// Returning an error halts decoding of any further objects. +type MutateFunc func(obj k8s.Object) error + +// HandlerFunc is a function executed after an object has been decoded and patched. If an error is returned, further decoding is halted. +type HandlerFunc func(ctx context.Context, obj k8s.Object) error + +// DecodeEachFile resolves files at the filesystem matching the pattern, decoding JSON or YAML files. Supports multi-document files. +// +// If handlerFn returns an error, decoding is halted. +// Options may be provided to configure the behavior of the decoder. +func DecodeEachFile(ctx context.Context, fsys fs.FS, pattern string, handlerFn HandlerFunc, options ...DecodeOption) error { + files, err := fs.Glob(fsys, pattern) + if err != nil { + return err + } + for _, file := range files { + f, err := fsys.Open(file) + if err != nil { + return err + } + defer f.Close() + if err := DecodeEach(ctx, f, handlerFn, options...); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + } + return nil +} + +// DecodeAllFiles resolves files at the filesystem matching the pattern, decoding JSON or YAML files. Supports multi-document files. +// Falls back to the unstructured.Unstructured type if a matching type cannot be found for the Kind. +// Options may be provided to configure the behavior of the decoder. +func DecodeAllFiles(ctx context.Context, fsys fs.FS, pattern string, options ...DecodeOption) ([]k8s.Object, error) { + objects := []k8s.Object{} + err := DecodeEachFile(ctx, fsys, pattern, func(ctx context.Context, obj k8s.Object) error { + objects = append(objects, obj) + return nil + }, options...) + return objects, err +} + +// ApplyWithManifestDir resolves all the files in the Directory dirPath against the globbing pattern and creates a kubernetes +// resource for each of the resources found under the manifest directory. +func ApplyWithManifestDir(ctx context.Context, r *resources.Resources, dirPath, pattern string, createOptions []resources.CreateOption, options ...DecodeOption) error { + err := DecodeEachFile(ctx, os.DirFS(dirPath), pattern, CreateHandler(r, createOptions...), options...) + return err +} + +// DeleteWithManifestDir does the reverse of ApplyUsingManifestDir does. This will resolve all files in the dirPath against the pattern and then +// delete those kubernetes resources found under the manifest directory. +func DeleteWithManifestDir(ctx context.Context, r *resources.Resources, dirPath, pattern string, deleteOptions []resources.DeleteOption, options ...DecodeOption) error { + err := DecodeEachFile(ctx, os.DirFS(dirPath), pattern, DeleteHandler(r, deleteOptions...), options...) + return err +} + +// Decode a stream of documents of any Kind using either the innate typing of the scheme. +// Falls back to the unstructured.Unstructured type if a matching type cannot be found for the Kind. +// +// If handlerFn returns an error, decoding is halted. +// Options may be provided to configure the behavior of the decoder. +func DecodeEach(ctx context.Context, manifest io.Reader, handlerFn HandlerFunc, options ...DecodeOption) error { + decoder := yaml.NewYAMLReader(bufio.NewReader(manifest)) + for { + b, err := decoder.Read() + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return err + } + obj, err := DecodeAny(bytes.NewReader(b), options...) + if err != nil { + return err + } + if err := handlerFn(ctx, obj); err != nil { + return err + } + } + return nil +} + +// DecodeAll is a stream of documents of any Kind using either the innate typing of the scheme. +// Falls back to the unstructured.Unstructured type if a matching type cannot be found for the Kind. +// Options may be provided to configure the behavior of the decoder. +func DecodeAll(ctx context.Context, manifest io.Reader, options ...DecodeOption) ([]k8s.Object, error) { + objects := []k8s.Object{} + err := DecodeEach(ctx, manifest, func(ctx context.Context, obj k8s.Object) error { + objects = append(objects, obj) + return nil + }, options...) + return objects, err +} + +// DecodeAny decodes any single-document YAML or JSON input using either the innate typing of the scheme. +// Falls back to the unstructured.Unstructured type if a matching type cannot be found for the Kind. +// Options may be provided to configure the behavior of the decoder. +func DecodeAny(manifest io.Reader, options ...DecodeOption) (k8s.Object, error) { + decodeOpt := &Options{} + for _, opt := range options { + opt(decodeOpt) + } + + k8sDecoder := serializer.NewCodecFactory(scheme.Scheme).UniversalDeserializer().Decode + b, err := io.ReadAll(manifest) + if err != nil { + return nil, err + } + runtimeObj, _, err := k8sDecoder(b, decodeOpt.DefaultGVK, nil) + if runtime.IsNotRegisteredError(err) { + // fallback to the unstructured.Unstructured type if a type is not registered for the Object to be decoded + runtimeObj = &unstructured.Unstructured{} + if err := yaml.Unmarshal(b, runtimeObj); err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + obj, ok := runtimeObj.(k8s.Object) + if !ok { + return nil, err + } + for _, patch := range decodeOpt.MutateFuncs { + if err := patch(obj); err != nil { + return nil, err + } + } + return obj, nil +} + +// Decode a single-document YAML or JSON file into the provided object. Patches are applied +// after decoding to the object to update the loaded resource. +func Decode(manifest io.Reader, obj k8s.Object, options ...DecodeOption) error { + decodeOpt := &Options{} + for _, opt := range options { + opt(decodeOpt) + } + if err := yaml.NewYAMLOrJSONDecoder(manifest, 1024).Decode(obj); err != nil { + return err + } + for _, patch := range decodeOpt.MutateFuncs { + if err := patch(obj); err != nil { + return err + } + } + return nil +} + +// DecodeFile decodes a single-document YAML or JSON file into the provided object. Patches are applied +// after decoding to the object to update the loaded resource. +func DecodeFile(fsys fs.FS, manifestPath string, obj k8s.Object, options ...DecodeOption) error { + f, err := fsys.Open(manifestPath) + if err != nil { + return err + } + defer f.Close() + return Decode(f, obj, options...) +} + +// DecodeString decodes a single-document YAML or JSON string into the provided object. Patches are applied +// after decoding to the object to update the loaded resource. +func DecodeString(rawManifest string, obj k8s.Object, options ...DecodeOption) error { + return Decode(strings.NewReader(rawManifest), obj, options...) +} + +// DefaultGVK instructs the decoder to use the given type to look up the appropriate Go type to decode into +// instead of its default behavior of deciding this by decoding the Group, Version, and Kind fields. +func DefaultGVK(defaults *schema.GroupVersionKind) DecodeOption { + return func(do *Options) { + do.DefaultGVK = defaults + } +} + +// MutateOption can be used to add a custom MutateFunc to the DecodeOption +// used to configure the decoding of objects +func MutateOption(m MutateFunc) DecodeOption { + return func(do *Options) { + do.MutateFuncs = append(do.MutateFuncs, m) + } +} + +// MutateLabels is an optional parameter to decoding functions that will patch an objects metadata.labels +func MutateLabels(overrides map[string]string) DecodeOption { + return MutateOption(func(obj k8s.Object) error { + labels := obj.GetLabels() + if labels == nil { + labels = make(map[string]string) + obj.SetLabels(labels) + } + for key, value := range overrides { + labels[key] = value + } + return nil + }) +} + +// MutateAnnotations is an optional parameter to decoding functions that will patch an objects metadata.annotations +func MutateAnnotations(overrides map[string]string) DecodeOption { + return MutateOption(func(obj k8s.Object) error { + annotations := obj.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + obj.SetLabels(annotations) + } + for key, value := range overrides { + annotations[key] = value + } + return nil + }) +} + +// MutateOwnerAnnotations is an optional parameter to decoding functions that will patch objects using the given owner object +func MutateOwnerAnnotations(owner k8s.Object) DecodeOption { + return MutateOption(func(obj k8s.Object) error { + return controllerutil.SetOwnerReference(owner, obj, scheme.Scheme) + }) +} + +// MutateNamespace is an optional parameter to decoding functions that will patch objects with the given namespace name +func MutateNamespace(namespace string) DecodeOption { + return MutateOption(func(obj k8s.Object) error { + obj.SetNamespace(namespace) + return nil + }) +} + +// CreateHandler returns a HandlerFunc that will create objects +func CreateHandler(r *resources.Resources, opts ...resources.CreateOption) HandlerFunc { + return func(ctx context.Context, obj k8s.Object) error { + return r.Create(ctx, obj, opts...) + } +} + +// ReadHandler returns a HandlerFunc that will use the provided object's Kind / Namespace / Name to retrieve +// the current state of the object using the provided Resource client. +// This helper makes it easy to use a stale reference to an object to retrieve its current version. +func ReadHandler(r *resources.Resources, handler HandlerFunc) HandlerFunc { + return func(ctx context.Context, obj k8s.Object) error { + name := obj.GetName() + namespace := obj.GetNamespace() + // use scheme.Scheme to generate a new, empty object to use as a base for decoding into + gvk := obj.GetObjectKind().GroupVersionKind() + o, err := scheme.Scheme.New(gvk) + if err != nil { + return fmt.Errorf("resources: GroupVersionKind not found in scheme: %s", gvk.String()) + } + obj, ok := o.(k8s.Object) + if !ok { + return fmt.Errorf("resources: unexpected type %T does not satisfy k8s.Object", obj) + } + if err := r.Get(ctx, name, namespace, obj); err != nil { + return err + } + return handler(ctx, obj) + } +} + +// UpdateHandler returns a HandlerFunc that will update objects +func UpdateHandler(r *resources.Resources, opts ...resources.UpdateOption) HandlerFunc { + return func(ctx context.Context, obj k8s.Object) error { + return r.Update(ctx, obj, opts...) + } +} + +// DeleteHandler returns a HandlerFunc that will delete objects +func DeleteHandler(r *resources.Resources, opts ...resources.DeleteOption) HandlerFunc { + return func(ctx context.Context, obj k8s.Object) error { + return r.Delete(ctx, obj, opts...) + } +} + +// IgnoreErrorHandler returns a HandlerFunc that will ignore the provided error if the errorMatcher returns true +func IgnoreErrorHandler(handler HandlerFunc, errorMatcher func(err error) bool) HandlerFunc { + return func(ctx context.Context, obj k8s.Object) error { + if err := handler(ctx, obj); err != nil && !errorMatcher(err) { + return err + } + return nil + } +} + +// NoopHandler returns a Handler func that only returns nil +func NoopHandler(_ *resources.Resources, _ ...resources.DeleteOption) HandlerFunc { + return func(ctx context.Context, obj k8s.Object) error { + return nil + } +} + +// CreateIgnoreAlreadyExists returns a HandlerFunc that will create objects if they do not already exist +func CreateIgnoreAlreadyExists(r *resources.Resources, opts ...resources.CreateOption) HandlerFunc { + return IgnoreErrorHandler(CreateHandler(r, opts...), apierrors.IsAlreadyExists) +} + +// DeleteIgnoreNotFound returns a HandlerFunc that will delete objects if they do not already exist +func DeleteIgnoreNotFound(r *resources.Resources, opts ...resources.DeleteOption) HandlerFunc { + return IgnoreErrorHandler(DeleteHandler(r, opts...), apierrors.IsNotFound) +} + +func init() { + log.SetLogger(klog.NewKlogr()) +} diff --git a/vendor/sigs.k8s.io/e2e-framework/klient/k8s/resources/resources.go b/vendor/sigs.k8s.io/e2e-framework/klient/k8s/resources/resources.go index 3942b0730..49cd29cef 100644 --- a/vendor/sigs.k8s.io/e2e-framework/klient/k8s/resources/resources.go +++ b/vendor/sigs.k8s.io/e2e-framework/klient/k8s/resources/resources.go @@ -17,17 +17,26 @@ limitations under the License. package resources import ( + "bytes" "context" "errors" "time" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/rest" - + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/remotecommand" + "k8s.io/klog/v2" cr "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/e2e-framework/klient/k8s" + "sigs.k8s.io/e2e-framework/klient/k8s/watcher" ) type Resources struct { @@ -67,6 +76,11 @@ func New(cfg *rest.Config) (*Resources, error) { return res, nil } +// GetConfig hepls to get config type *rest.Config +func (r *Resources) GetConfig() *rest.Config { + return r.config +} + func (r *Resources) WithNamespace(ns string) *Resources { r.namespace = ns return r @@ -84,7 +98,11 @@ func (r *Resources) Create(ctx context.Context, obj k8s.Object, opts ...CreateOp fn(createOptions) } - o := &cr.CreateOptions{Raw: createOptions} + o := &cr.CreateOptions{ + Raw: createOptions, + DryRun: createOptions.DryRun, + FieldManager: createOptions.FieldManager, + } return r.client.Create(ctx, obj, o) } @@ -97,10 +115,31 @@ func (r *Resources) Update(ctx context.Context, obj k8s.Object, opts ...UpdateOp fn(updateOptions) } - o := &cr.UpdateOptions{Raw: updateOptions} + o := &cr.UpdateOptions{ + Raw: updateOptions, + DryRun: updateOptions.DryRun, + FieldManager: updateOptions.FieldManager, + } return r.client.Update(ctx, obj, o) } +// UpdateSubresource updates the subresource of the object +func (r *Resources) UpdateSubresource(ctx context.Context, obj k8s.Object, subresource string, opts ...UpdateOption) error { + updateOptions := &metav1.UpdateOptions{} + for _, fn := range opts { + fn(updateOptions) + } + + uo := cr.UpdateOptions{Raw: updateOptions} + o := &cr.SubResourceUpdateOptions{UpdateOptions: uo} + return r.client.SubResource(subresource).Update(ctx, obj, o) +} + +// UpdateStatus updates the status of the object +func (r *Resources) UpdateStatus(ctx context.Context, obj k8s.Object, opts ...UpdateOption) error { + return r.UpdateSubresource(ctx, obj, "status", opts...) +} + type DeleteOption func(*metav1.DeleteOptions) func (r *Resources) Delete(ctx context.Context, obj k8s.Object, opts ...DeleteOption) error { @@ -109,7 +148,13 @@ func (r *Resources) Delete(ctx context.Context, obj k8s.Object, opts ...DeleteOp fn(deleteOptions) } - o := &cr.DeleteOptions{Raw: deleteOptions} + o := &cr.DeleteOptions{ + Raw: deleteOptions, + GracePeriodSeconds: deleteOptions.GracePeriodSeconds, + Preconditions: deleteOptions.Preconditions, + PropagationPolicy: deleteOptions.PropagationPolicy, + DryRun: deleteOptions.DryRun, + } return r.client.Delete(ctx, obj, o) } @@ -132,7 +177,22 @@ func (r *Resources) List(ctx context.Context, objs k8s.ObjectList, opts ...ListO fn(listOptions) } - o := &cr.ListOptions{Raw: listOptions} + ls, err := labels.Parse(listOptions.LabelSelector) + if err != nil { + return err + } + fs, err := fields.ParseSelector(listOptions.FieldSelector) + if err != nil { + return err + } + + o := &cr.ListOptions{ + Raw: listOptions, + FieldSelector: fs, + LabelSelector: ls, + Continue: listOptions.Continue, + Limit: listOptions.Limit, + } if r.namespace != "" { o.Namespace = r.namespace } @@ -156,8 +216,8 @@ func WithTimeout(to time.Duration) ListOption { // PatchOption is used to provide additional arguments to the Patch call. type PatchOption func(*metav1.PatchOptions) -// Patch patches portion of object `orig` with data from object `patch` -func (r *Resources) Patch(ctx context.Context, objs k8s.Object, patch k8s.Patch, opts ...PatchOption) error { +// Patch patches portion of object `obj` with data from object `patch` +func (r *Resources) Patch(ctx context.Context, obj k8s.Object, patch k8s.Patch, opts ...PatchOption) error { patchOptions := &metav1.PatchOptions{} for _, fn := range opts { @@ -166,8 +226,33 @@ func (r *Resources) Patch(ctx context.Context, objs k8s.Object, patch k8s.Patch, p := cr.RawPatch(patch.PatchType, patch.Data) - o := &cr.PatchOptions{Raw: patchOptions} - return r.client.Patch(ctx, objs, p, o) + o := &cr.PatchOptions{ + Raw: patchOptions, + DryRun: patchOptions.DryRun, + Force: patchOptions.Force, + FieldManager: patchOptions.FieldManager, + } + return r.client.Patch(ctx, obj, p, o) +} + +// PatchSubresource patches portion of object `obj` with data from object `patch` +func (r *Resources) PatchSubresource(ctx context.Context, obj k8s.Object, subresource string, patch k8s.Patch, opts ...PatchOption) error { + patchOptions := &metav1.PatchOptions{} + + for _, fn := range opts { + fn(patchOptions) + } + + p := cr.RawPatch(patch.PatchType, patch.Data) + + po := cr.PatchOptions{Raw: patchOptions} + o := &cr.SubResourcePatchOptions{PatchOptions: po} + return r.client.SubResource(subresource).Patch(ctx, obj, p, o) +} + +// PatchStatus patches portion of object `obj` with data from object `patch` +func (r *Resources) PatchStatus(ctx context.Context, objs k8s.Object, patch k8s.Patch, opts ...PatchOption) error { + return r.PatchSubresource(ctx, objs, "status", patch, opts...) } // Annotate attach annotations to an existing resource objec @@ -179,3 +264,71 @@ func (r *Resources) Annotate(obj k8s.Object, annotation map[string]string) { func (r *Resources) Label(obj k8s.Object, label map[string]string) { obj.SetLabels(label) } + +func (r *Resources) GetScheme() *runtime.Scheme { + return r.scheme +} + +// GetClient return the controller-runtime client instance +func (r *Resources) GetControllerRuntimeClient() cr.Client { + return r.client +} + +func (r *Resources) Watch(object k8s.ObjectList, opts ...ListOption) *watcher.EventHandlerFuncs { + listOptions := &metav1.ListOptions{} + + for _, fn := range opts { + fn(listOptions) + } + + o := &cr.ListOptions{Raw: listOptions} + + return &watcher.EventHandlerFuncs{ + ListOptions: o, + K8sObject: object, + Cfg: r.GetConfig(), + } +} + +func (r *Resources) ExecInPod(ctx context.Context, namespaceName, podName, containerName string, command []string, stdout, stderr *bytes.Buffer) error { + clientset, err := kubernetes.NewForConfig(r.config) + if err != nil { + return err + } + + req := clientset.CoreV1().RESTClient().Post(). + Resource("pods"). + Name(podName). + Namespace(namespaceName). + SubResource("exec") + newScheme := runtime.NewScheme() + if err := v1.AddToScheme(newScheme); err != nil { + return err + } + parameterCodec := runtime.NewParameterCodec(newScheme) + req.VersionedParams(&v1.PodExecOptions{ + Container: containerName, + Command: command, + Stdout: true, + Stderr: true, + }, parameterCodec) + + exec, err := remotecommand.NewSPDYExecutor(r.config, "POST", req.URL()) + if err != nil { + panic(err) + } + + err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{ + Stdout: stdout, + Stderr: stderr, + }) + if err != nil { + return err + } + + return nil +} + +func init() { + log.SetLogger(klog.NewKlogr()) +} diff --git a/vendor/sigs.k8s.io/e2e-framework/klient/k8s/watcher/watch.go b/vendor/sigs.k8s.io/e2e-framework/klient/k8s/watcher/watch.go new file mode 100644 index 000000000..596fb1402 --- /dev/null +++ b/vendor/sigs.k8s.io/e2e-framework/klient/k8s/watcher/watch.go @@ -0,0 +1,132 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watcher + +import ( + "context" + + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/rest" + "k8s.io/klog/v2" + cr "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/e2e-framework/klient/k8s" +) + +// EventHandlerFuncs is an adaptor to let you easily specify as many or +// as few of functions to invoke while getting notification from watcher +type EventHandlerFuncs struct { + addFunc func(obj interface{}) + updateFunc func(newObj interface{}) + deleteFunc func(obj interface{}) + watcher watch.Interface + ListOptions *cr.ListOptions + K8sObject k8s.ObjectList + Cfg *rest.Config +} + +// EventHandler can handle notifications for events that happen to a resource. +// Start will be waiting for the events notification which is responsible +// for invoking the registered user defined functions. +// Stop used to stop the watcher. +type EventHandler interface { + Start(ctx context.Context) + Stop() +} + +// Start triggers the registered methods based on the event received for +// particular k8s resources +func (e *EventHandlerFuncs) Start(ctx context.Context) error { + // check if context is valid and that it has not been cancelled. + if ctx.Err() != nil { + return ctx.Err() + } + + cl, err := cr.NewWithWatch(e.Cfg, cr.Options{}) + if err != nil { + return err + } + + w, err := cl.Watch(ctx, e.K8sObject, e.ListOptions) + if err != nil { + return err + } + + // set watcher object + e.watcher = w + + go func() { + for { + select { + case <-ctx.Done(): + if ctx.Err() != nil { + return + } + case event := <-e.watcher.ResultChan(): + // retrieve the event type + eventType := event.Type + + switch eventType { + case watch.Added: + // calls AddFunc if it's not nil. + if e.addFunc != nil { + e.addFunc(event.Object) + } + case watch.Modified: + // calls UpdateFunc if it's not nil. + if e.updateFunc != nil { + e.updateFunc(event.Object) + } + case watch.Deleted: + // calls DeleteFunc if it's not nil. + if e.deleteFunc != nil { + e.deleteFunc(event.Object) + } + } + } + } + }() + + return nil +} + +// Stop triggers stopping a particular k8s watch resources +func (e *EventHandlerFuncs) Stop() { + e.watcher.Stop() +} + +// SetAddFunc used to set action on create event +func (e *EventHandlerFuncs) WithAddFunc(addfn func(obj interface{})) *EventHandlerFuncs { + e.addFunc = addfn + return e +} + +// SetUpdateFunc sets action for any update events +func (e *EventHandlerFuncs) WithUpdateFunc(updatefn func(updated interface{})) *EventHandlerFuncs { + e.updateFunc = updatefn + return e +} + +// SetDeleteFunc sets action for delete events +func (e *EventHandlerFuncs) WithDeleteFunc(deletefn func(obj interface{})) *EventHandlerFuncs { + e.deleteFunc = deletefn + return e +} + +func init() { + log.SetLogger(klog.NewKlogr()) +} diff --git a/vendor/sigs.k8s.io/e2e-framework/klient/wait/conditions/conditions.go b/vendor/sigs.k8s.io/e2e-framework/klient/wait/conditions/conditions.go index bcab5d538..6e81962e7 100644 --- a/vendor/sigs.k8s.io/e2e-framework/klient/wait/conditions/conditions.go +++ b/vendor/sigs.k8s.io/e2e-framework/klient/wait/conditions/conditions.go @@ -27,6 +27,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apimachinerywait "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/e2e-framework/klient/k8s" @@ -50,10 +51,10 @@ func (c *Condition) namespacedName(obj k8s.Object) string { // ResourceScaled is a helper function used to check if the resource under question has a pre-defined number of // replicas. This can be leveraged for checking cases such as scaling up and down a deployment or STS and any // other scalable resources. -func (c *Condition) ResourceScaled(obj k8s.Object, scaleFetcher func(object k8s.Object) int32, replica int32) apimachinerywait.ConditionFunc { - return func() (done bool, err error) { +func (c *Condition) ResourceScaled(obj k8s.Object, scaleFetcher func(object k8s.Object) int32, replica int32) apimachinerywait.ConditionWithContextFunc { + return func(ctx context.Context) (done bool, err error) { log.V(4).InfoS("Checking for resource to be scaled", "resource", c.namespacedName(obj), "replica", replica) - if err := c.resources.Get(context.TODO(), obj.GetName(), obj.GetNamespace(), obj); err != nil { + if err := c.resources.Get(ctx, obj.GetName(), obj.GetNamespace(), obj); err != nil { return false, nil } return scaleFetcher(obj) == replica, nil @@ -62,9 +63,9 @@ func (c *Condition) ResourceScaled(obj k8s.Object, scaleFetcher func(object k8s. // ResourceMatch is a helper function used to check if the resource under question has met a pre-defined state. This can // be leveraged for checking fields on a resource that may not be immediately present upon creation. -func (c *Condition) ResourceMatch(obj k8s.Object, matchFetcher func(object k8s.Object) bool) apimachinerywait.ConditionFunc { - return func() (done bool, err error) { - if err := c.resources.Get(context.TODO(), obj.GetName(), obj.GetNamespace(), obj); err != nil { +func (c *Condition) ResourceMatch(obj k8s.Object, matchFetcher func(object k8s.Object) bool) apimachinerywait.ConditionWithContextFunc { + return func(ctx context.Context) (done bool, err error) { + if err := c.resources.Get(ctx, obj.GetName(), obj.GetNamespace(), obj); err != nil { return false, nil } return matchFetcher(obj), nil @@ -73,15 +74,15 @@ func (c *Condition) ResourceMatch(obj k8s.Object, matchFetcher func(object k8s.O // ResourceListN is a helper function that can be used to check for a minimum number of returned objects in a list. This function // accepts list options that can be used to adjust the set of objects queried for in the List resource operation. -func (c *Condition) ResourceListN(list k8s.ObjectList, n int, listOptions ...resources.ListOption) apimachinerywait.ConditionFunc { +func (c *Condition) ResourceListN(list k8s.ObjectList, n int, listOptions ...resources.ListOption) apimachinerywait.ConditionWithContextFunc { return c.ResourceListMatchN(list, n, func(object k8s.Object) bool { return true }, listOptions...) } // ResourceListMatchN is a helper function that can be used to check for a minimum number of returned objects in a list. This function // accepts list options and a match function that can be used to adjust the set of objects queried for in the List resource operation. -func (c *Condition) ResourceListMatchN(list k8s.ObjectList, n int, matchFetcher func(object k8s.Object) bool, listOptions ...resources.ListOption) apimachinerywait.ConditionFunc { - return func() (done bool, err error) { - if err := c.resources.List(context.TODO(), list, listOptions...); err != nil { +func (c *Condition) ResourceListMatchN(list k8s.ObjectList, n int, matchFetcher func(object k8s.Object) bool, listOptions ...resources.ListOption) apimachinerywait.ConditionWithContextFunc { + return func(ctx context.Context) (done bool, err error) { + if err = c.resources.List(ctx, list, listOptions...); err != nil { return false, nil } var found int @@ -102,22 +103,22 @@ func (c *Condition) ResourceListMatchN(list k8s.ObjectList, n int, matchFetcher // ResourcesFound is a helper function that can be used to check for a set of objects. This function accepts a list // of named objects and will wait until it is able to retrieve each. -func (c *Condition) ResourcesFound(list k8s.ObjectList) apimachinerywait.ConditionFunc { +func (c *Condition) ResourcesFound(list k8s.ObjectList) apimachinerywait.ConditionWithContextFunc { return c.ResourcesMatch(list, func(object k8s.Object) bool { return true }) } // ResourcesMatch is a helper function that can be used to check for a set of objects. This function accepts a list // of named objects and a match function, and will wait until it is able to retrieve each while passing the match validation. -func (c *Condition) ResourcesMatch(list k8s.ObjectList, matchFetcher func(object k8s.Object) bool) apimachinerywait.ConditionFunc { +func (c *Condition) ResourcesMatch(list k8s.ObjectList, matchFetcher func(object k8s.Object) bool) apimachinerywait.ConditionWithContextFunc { metaList, err := meta.ExtractList(list) if err != nil { - return func() (done bool, err error) { return false, err } + return func(ctx context.Context) (done bool, err error) { return false, err } } objects := make(map[k8s.Object]bool) for _, o := range metaList { obj, ok := o.(k8s.Object) if !ok { - return func() (done bool, err error) { + return func(ctx context.Context) (done bool, err error) { return false, fmt.Errorf("condition: unexpected type %T in list, does not satisfy k8s.Object", obj) } } @@ -125,11 +126,11 @@ func (c *Condition) ResourcesMatch(list k8s.ObjectList, matchFetcher func(object objects[obj] = false } } - return func() (done bool, err error) { + return func(ctx context.Context) (done bool, err error) { found := 0 for obj, created := range objects { if !created { - if err := c.resources.Get(context.TODO(), obj.GetName(), obj.GetNamespace(), obj); errors.IsNotFound(err) { + if err := c.resources.Get(ctx, obj.GetName(), obj.GetNamespace(), obj); errors.IsNotFound(err) { continue } else if err != nil { return false, err @@ -147,16 +148,16 @@ func (c *Condition) ResourcesMatch(list k8s.ObjectList, matchFetcher func(object // ResourcesDeleted is a helper function that can be used to check for if a set of objects has been deleted. This function // accepts a list of named objects and will wait until it is not able to find each. -func (c *Condition) ResourcesDeleted(list k8s.ObjectList) apimachinerywait.ConditionFunc { +func (c *Condition) ResourcesDeleted(list k8s.ObjectList) apimachinerywait.ConditionWithContextFunc { metaList, err := meta.ExtractList(list) if err != nil { - return func() (done bool, err error) { return false, err } + return func(ctx context.Context) (done bool, err error) { return false, err } } objects := make(map[k8s.Object]bool) for _, o := range metaList { obj, ok := o.(k8s.Object) if !ok { - return func() (done bool, err error) { + return func(ctx context.Context) (done bool, err error) { return false, fmt.Errorf("condition: unexpected type %T in list, does not satisfy k8s.Object", obj) } } @@ -164,10 +165,11 @@ func (c *Condition) ResourcesDeleted(list k8s.ObjectList) apimachinerywait.Condi objects[obj] = true } } - return func() (done bool, err error) { + return func(ctx context.Context) (done bool, err error) { for obj, created := range objects { if created { - if err := c.resources.Get(context.TODO(), obj.GetName(), obj.GetNamespace(), obj); errors.IsNotFound(err) { + log.V(4).InfoS("Checking for resource to be garbage collected", "resource", c.namespacedName(obj)) + if err := c.resources.Get(ctx, obj.GetName(), obj.GetNamespace(), obj); errors.IsNotFound(err) { delete(objects, obj) } else if err != nil { return false, err @@ -184,8 +186,8 @@ func (c *Condition) ResourcesDeleted(list k8s.ObjectList) apimachinerywait.Condi // // This method can be leveraged against any Kubernetes resource to check the deletion workflow and it does so by // checking the resource and waiting until it obtains a v1.StatusReasonNotFound error from the API -func (c *Condition) ResourceDeleted(obj k8s.Object) apimachinerywait.ConditionFunc { - return func() (done bool, err error) { +func (c *Condition) ResourceDeleted(obj k8s.Object) apimachinerywait.ConditionWithContextFunc { + return func(ctx context.Context) (done bool, err error) { log.V(4).InfoS("Checking for resource to be garbage collected", "resource", c.namespacedName(obj)) if err := c.resources.Get(context.Background(), obj.GetName(), obj.GetNamespace(), obj); err != nil { if errors.IsNotFound(err) { @@ -200,10 +202,10 @@ func (c *Condition) ResourceDeleted(obj k8s.Object) apimachinerywait.ConditionFu // JobConditionMatch is a helper function that can be used to check the Job Completion or runtime status against a // specific condition. This function accepts both conditionType and conditionState as argument and hence you can use this // to match both positive or negative cases with suitable values passed to the arguments. -func (c *Condition) JobConditionMatch(job k8s.Object, conditionType batchv1.JobConditionType, conditionState v1.ConditionStatus) apimachinerywait.ConditionFunc { - return func() (done bool, err error) { +func (c *Condition) JobConditionMatch(job k8s.Object, conditionType batchv1.JobConditionType, conditionState v1.ConditionStatus) apimachinerywait.ConditionWithContextFunc { + return func(ctx context.Context) (done bool, err error) { log.V(4).InfoS("Checking for condition match", "resource", c.namespacedName(job), "state", conditionState, "conditionType", conditionType) - if err := c.resources.Get(context.TODO(), job.GetName(), job.GetNamespace(), job); err != nil { + if err := c.resources.Get(ctx, job.GetName(), job.GetNamespace(), job); err != nil { return false, err } status := job.(*batchv1.Job).Status @@ -218,9 +220,9 @@ func (c *Condition) JobConditionMatch(job k8s.Object, conditionType batchv1.JobC } // DeploymentConditionMatch is a helper function that can be used to check a specific condition match for the Deployment in question. -func (c *Condition) DeploymentConditionMatch(deployment k8s.Object, conditionType appsv1.DeploymentConditionType, conditionState v1.ConditionStatus) apimachinerywait.ConditionFunc { - return func() (done bool, err error) { - if err := c.resources.Get(context.TODO(), deployment.GetName(), deployment.GetNamespace(), deployment); err != nil { +func (c *Condition) DeploymentConditionMatch(deployment k8s.Object, conditionType appsv1.DeploymentConditionType, conditionState v1.ConditionStatus) apimachinerywait.ConditionWithContextFunc { + return func(ctx context.Context) (done bool, err error) { + if err := c.resources.Get(ctx, deployment.GetName(), deployment.GetNamespace(), deployment); err != nil { return false, err } for _, cond := range deployment.(*appsv1.Deployment).Status.Conditions { @@ -234,10 +236,10 @@ func (c *Condition) DeploymentConditionMatch(deployment k8s.Object, conditionTyp // PodConditionMatch is a helper function that can be used to check a specific condition match for the Pod in question. // This is extended into a few simplified match helpers such as PodReady and ContainersReady as well. -func (c *Condition) PodConditionMatch(pod k8s.Object, conditionType v1.PodConditionType, conditionState v1.ConditionStatus) apimachinerywait.ConditionFunc { - return func() (done bool, err error) { +func (c *Condition) PodConditionMatch(pod k8s.Object, conditionType v1.PodConditionType, conditionState v1.ConditionStatus) apimachinerywait.ConditionWithContextFunc { + return func(ctx context.Context) (done bool, err error) { log.V(4).InfoS("Checking for condition match", "resource", c.namespacedName(pod), "state", conditionState, "conditionType", conditionType) - if err := c.resources.Get(context.TODO(), pod.GetName(), pod.GetNamespace(), pod); err != nil { + if err := c.resources.Get(ctx, pod.GetName(), pod.GetNamespace(), pod); err != nil { return false, err } status := pod.(*v1.Pod).Status @@ -254,8 +256,8 @@ func (c *Condition) PodConditionMatch(pod k8s.Object, conditionType v1.PodCondit // PodPhaseMatch is a helper function that is used to check and see if the Pod Has reached a specific Phase of the // runtime. This can be combined with PodConditionMatch to check if a specific condition and phase has been met. // This will enable validation such as checking against CLB of a POD. -func (c *Condition) PodPhaseMatch(pod k8s.Object, phase v1.PodPhase) apimachinerywait.ConditionFunc { - return func() (done bool, err error) { +func (c *Condition) PodPhaseMatch(pod k8s.Object, phase v1.PodPhase) apimachinerywait.ConditionWithContextFunc { + return func(ctx context.Context) (done bool, err error) { log.V(4).InfoS("Checking for phase match", "resource", c.namespacedName(pod), "phase", phase) if err := c.resources.Get(context.Background(), pod.GetName(), pod.GetNamespace(), pod); err != nil { return false, err @@ -266,28 +268,38 @@ func (c *Condition) PodPhaseMatch(pod k8s.Object, phase v1.PodPhase) apimachiner } // PodReady is a helper function used to check if the pod condition v1.PodReady has reached v1.ConditionTrue state -func (c *Condition) PodReady(pod k8s.Object) apimachinerywait.ConditionFunc { +func (c *Condition) PodReady(pod k8s.Object) apimachinerywait.ConditionWithContextFunc { return c.PodConditionMatch(pod, v1.PodReady, v1.ConditionTrue) } // ContainersReady is a helper function used to check if the pod condition v1.ContainersReady has reached v1.ConditionTrue -func (c *Condition) ContainersReady(pod k8s.Object) apimachinerywait.ConditionFunc { +func (c *Condition) ContainersReady(pod k8s.Object) apimachinerywait.ConditionWithContextFunc { return c.PodConditionMatch(pod, v1.ContainersReady, v1.ConditionTrue) } // PodRunning is a helper function used to check if the pod.Status.Phase attribute of the Pod has reached v1.PodRunning -func (c *Condition) PodRunning(pod k8s.Object) apimachinerywait.ConditionFunc { +func (c *Condition) PodRunning(pod k8s.Object) apimachinerywait.ConditionWithContextFunc { return c.PodPhaseMatch(pod, v1.PodRunning) } // JobCompleted is a helper function used to check if the Job has been completed successfully by checking if the // batchv1.JobCompleted has reached the v1.ConditionTrue state -func (c *Condition) JobCompleted(job k8s.Object) apimachinerywait.ConditionFunc { +func (c *Condition) JobCompleted(job k8s.Object) apimachinerywait.ConditionWithContextFunc { return c.JobConditionMatch(job, batchv1.JobComplete, v1.ConditionTrue) } // JobFailed is a helper function used to check if the Job has failed by checking if the batchv1.JobFailed has reached // v1.ConditionTrue state -func (c *Condition) JobFailed(job k8s.Object) apimachinerywait.ConditionFunc { +func (c *Condition) JobFailed(job k8s.Object) apimachinerywait.ConditionWithContextFunc { return c.JobConditionMatch(job, batchv1.JobFailed, v1.ConditionTrue) } + +// DeploymentAvailable is a helper function used to check if the deployment condition appsv1.DeploymentAvailable +// has reached v1.ConditionTrue state +func (c *Condition) DeploymentAvailable(name, namespace string) apimachinerywait.ConditionWithContextFunc { + return c.DeploymentConditionMatch( + &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}}, + appsv1.DeploymentAvailable, + v1.ConditionTrue, + ) +} diff --git a/vendor/sigs.k8s.io/e2e-framework/klient/wait/wait.go b/vendor/sigs.k8s.io/e2e-framework/klient/wait/wait.go index 4b219f34d..930f97947 100644 --- a/vendor/sigs.k8s.io/e2e-framework/klient/wait/wait.go +++ b/vendor/sigs.k8s.io/e2e-framework/klient/wait/wait.go @@ -17,6 +17,7 @@ limitations under the License. package wait import ( + "context" "time" apimachinerywait "k8s.io/apimachinery/pkg/util/wait" @@ -34,7 +35,7 @@ type Options struct { // to be met. Timeout time.Duration // StopChan is used to setup a wait mechanism using the apimachinerywait.PollUntil method - StopChan chan struct{} + Ctx context.Context // Immediate is used to indicate if the apimachinerywait's immediate wait method are to be // called instead of the regular one Immediate bool @@ -59,13 +60,12 @@ func WithInterval(interval time.Duration) Option { } } -// WithStopChannel provides a way to configure a Stop channel that can be used to run wait condition checks -// either until the condition has been successfully met or until the channel has been closed. This will enable +// WithContext provides a way to configure a context that can be used to cancel the wait condition checks. This will enable // end users to write test in cases where the max timeout is not really predictable or is a factor of a different // configuration or event. -func WithStopChannel(stopChan chan struct{}) Option { +func WithContext(ctx context.Context) Option { return func(options *Options) { - options.StopChan = stopChan + options.Ctx = ctx } } @@ -84,28 +84,24 @@ func WithImmediate() Option { // The conditions sub-packages provides a series of pre-defined wait functions that can be used by the developers // or a custom wait function can be passed as an argument to get a similar functionality if the check required // for your test is not already provided by the helper utility. -func For(conditionFunc apimachinerywait.ConditionFunc, opts ...Option) error { +func For(conditionFunc apimachinerywait.ConditionWithContextFunc, opts ...Option) error { options := &Options{ Interval: defaultPollInterval, Timeout: defaultPollTimeout, - StopChan: nil, + Ctx: nil, Immediate: false, } - + var cancel context.CancelFunc for _, fn := range opts { fn(options) } - // Setting the options.StopChan will force the usage of `PollUntil` - if options.StopChan != nil { - if options.Immediate { - return apimachinerywait.PollImmediateUntil(options.Interval, conditionFunc, options.StopChan) - } - return apimachinerywait.PollUntil(options.Interval, conditionFunc, options.StopChan) + if options.Ctx == nil { + options.Ctx, cancel = context.WithTimeout(context.Background(), options.Timeout) + defer cancel() } - if options.Immediate { - return apimachinerywait.PollImmediate(options.Interval, options.Timeout, conditionFunc) + return apimachinerywait.PollUntilContextCancel(options.Ctx, options.Interval, true, conditionFunc) } - return apimachinerywait.Poll(options.Interval, options.Timeout, conditionFunc) + return apimachinerywait.PollUntilContextCancel(options.Ctx, options.Interval, false, conditionFunc) } diff --git a/vendor/sigs.k8s.io/e2e-framework/pkg/env/action.go b/vendor/sigs.k8s.io/e2e-framework/pkg/env/action.go index 25f707fa6..1d236c7a3 100644 --- a/vendor/sigs.k8s.io/e2e-framework/pkg/env/action.go +++ b/vendor/sigs.k8s.io/e2e-framework/pkg/env/action.go @@ -21,12 +21,15 @@ import ( "fmt" "testing" + "k8s.io/klog/v2" "sigs.k8s.io/e2e-framework/pkg/envconf" "sigs.k8s.io/e2e-framework/pkg/internal/types" ) +type actionRole uint8 + const ( - roleSetup = iota + roleSetup actionRole = iota roleBeforeTest roleBeforeFeature roleAfterFeature @@ -34,6 +37,25 @@ const ( roleFinish ) +func (r actionRole) String() string { + switch r { + case roleSetup: + return "Setup" + case roleBeforeTest: + return "BeforeEachTest" + case roleBeforeFeature: + return "BeforeEachFeature" + case roleAfterFeature: + return "AfterEachFeature" + case roleAfterTest: + return "AfterEachTest" + case roleFinish: + return "Finish" + default: + panic("unknown role") // this should never happen + } +} + // action a group env functions type action struct { role actionRole @@ -52,6 +74,10 @@ type action struct { func (a *action) runWithT(ctx context.Context, cfg *envconf.Config, t *testing.T) (context.Context, error) { switch a.role { case roleBeforeTest, roleAfterTest: + if cfg.DryRunMode() { + klog.V(2).Info("Skipping execution of roleBeforeTest and roleAfterTest due to framework being in dry-run mode") + return ctx, nil + } for _, f := range a.testFuncs { if f == nil { continue @@ -74,6 +100,10 @@ func (a *action) runWithT(ctx context.Context, cfg *envconf.Config, t *testing.T func (a *action) runWithFeature(ctx context.Context, cfg *envconf.Config, t *testing.T, fi types.Feature) (context.Context, error) { switch a.role { case roleBeforeFeature, roleAfterFeature: + if cfg.DryRunMode() { + klog.V(2).Info("Skipping execution of roleBeforeFeature and roleAfterFeature due to framework being in dry-run mode") + return ctx, nil + } for _, f := range a.featureFuncs { if f == nil { continue @@ -92,6 +122,10 @@ func (a *action) runWithFeature(ctx context.Context, cfg *envconf.Config, t *tes } func (a *action) run(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + if cfg.DryRunMode() { + klog.V(2).InfoS("Skipping processing of action due to framework being in dry-run mode") + return ctx, nil + } for _, f := range a.funcs { if f == nil { continue diff --git a/vendor/sigs.k8s.io/e2e-framework/pkg/env/env.go b/vendor/sigs.k8s.io/e2e-framework/pkg/env/env.go index 1d3119a93..72e1a0511 100644 --- a/vendor/sigs.k8s.io/e2e-framework/pkg/env/env.go +++ b/vendor/sigs.k8s.io/e2e-framework/pkg/env/env.go @@ -21,12 +21,12 @@ package env import ( "context" "fmt" - "math/rand" + "regexp" + "runtime/debug" "sync" "testing" - "time" - log "k8s.io/klog/v2" + "k8s.io/klog/v2" "sigs.k8s.io/e2e-framework/pkg/envconf" "sigs.k8s.io/e2e-framework/pkg/features" @@ -37,15 +37,13 @@ type ( Environment = types.Environment Func = types.EnvFunc FeatureFunc = types.FeatureEnvFunc - - actionRole uint8 + TestFunc = types.TestEnvFunc ) type testEnv struct { ctx context.Context cfg *envconf.Config actions []action - rnd rand.Source } // New creates a test environment with no config attached. @@ -64,6 +62,15 @@ func NewWithConfig(cfg *envconf.Config) types.Environment { return env } +// NewFromFlags creates a test environment using configuration values from CLI flags +func NewFromFlags() (types.Environment, error) { + cfg, err := envconf.NewFromFlags() + if err != nil { + return nil, err + } + return NewWithConfig(cfg), nil +} + // NewWithKubeConfig creates an environment using an Environment Configuration value // and the given kubeconfig. func NewWithKubeConfig(kubeconfigfile string) types.Environment { @@ -97,7 +104,6 @@ func newTestEnv() *testEnv { return &testEnv{ ctx: context.Background(), cfg: envconf.New(), - rnd: rand.NewSource(time.Now().UnixNano()), } } @@ -182,40 +188,44 @@ func (e *testEnv) panicOnMissingContext() { // processTestActions is used to run a series of test action that were configured as // BeforeEachTest or AfterEachTest -func (e *testEnv) processTestActions(t *testing.T, actions []action) { +func (e *testEnv) processTestActions(ctx context.Context, t *testing.T, actions []action) context.Context { var err error + out := ctx for _, action := range actions { - if e.ctx, err = action.runWithT(e.ctx, e.cfg, t); err != nil { - t.Fatalf("BeforeEachTest failure: %s", err) + out, err = action.runWithT(ctx, e.cfg, t) + if err != nil { + t.Fatalf("%s failure: %s", action.role, err) } } + return out } // processTestFeature is used to trigger the execution of the actual feature. This function wraps the entire // workflow of orchestrating the feature execution be running the action configured by BeforeEachFeature / // AfterEachFeature. -func (e *testEnv) processTestFeature(t *testing.T, featureName string, feature types.Feature) { - var err error - - // execute each feature - beforeFeatureActions := e.getBeforeFeatureActions() - afterFeatureActions := e.getAfterFeatureActions() - - for _, action := range beforeFeatureActions { - if e.ctx, err = action.runWithFeature(e.ctx, e.cfg, t, deepCopyFeature(feature)); err != nil { - t.Fatalf("BeforeEachTest failure: %s", err) - } - } +func (e *testEnv) processTestFeature(ctx context.Context, t *testing.T, featureName string, feature types.Feature) context.Context { + // execute beforeEachFeature actions + ctx = e.processFeatureActions(ctx, t, feature, e.getBeforeFeatureActions()) // execute feature test - e.ctx = e.execFeature(e.ctx, t, featureName, feature) + ctx = e.execFeature(ctx, t, featureName, feature) - // execute beforeFeature actions - for _, action := range afterFeatureActions { - if e.ctx, err = action.runWithFeature(e.ctx, e.cfg, t, deepCopyFeature(feature)); err != nil { - t.Fatalf("BeforeEachTest failure: %s", err) + // execute afterEachFeature actions + return e.processFeatureActions(ctx, t, feature, e.getAfterFeatureActions()) +} + +// processFeatureActions is used to run a series of feature action that were configured as +// BeforeEachFeature or AfterEachFeature +func (e *testEnv) processFeatureActions(ctx context.Context, t *testing.T, feature types.Feature, actions []action) context.Context { + var err error + out := ctx + for _, action := range actions { + out, err = action.runWithFeature(out, e.cfg, t, deepCopyFeature(feature)) + if err != nil { + t.Fatalf("%s failure: %s", action.role, err) } } + return out } // processTests is a wrapper function that can be invoked by either Test or TestInParallel methods. @@ -224,43 +234,54 @@ func (e *testEnv) processTestFeature(t *testing.T, featureName string, feature t // // In case if the parallel run of test features are enabled, this function will invoke the processTestFeature // as a go-routine to get them to run in parallel -func (e *testEnv) processTests(t *testing.T, enableParallelRun bool, testFeatures ...types.Feature) { - e.panicOnMissingContext() +func (e *testEnv) processTests(ctx context.Context, t *testing.T, enableParallelRun bool, testFeatures ...types.Feature) context.Context { + if e.cfg.DryRunMode() { + klog.V(2).Info("e2e-framework is being run in dry-run mode. This will skip all the before/after step functions configured around your test assessments and features") + } + if ctx == nil { + panic("nil context") // this should never happen + } if len(testFeatures) == 0 { t.Log("No test testFeatures provided, skipping test") - return + return ctx } beforeTestActions := e.getBeforeTestActions() afterTestActions := e.getAfterTestActions() - e.processTestActions(t, beforeTestActions) - runInParallel := e.cfg.ParallelTestEnabled() && enableParallelRun if runInParallel { - log.V(4).Info("Running test features in parallel") + klog.V(4).Info("Running test features in parallel") } + ctx = e.processTestActions(ctx, t, beforeTestActions) + var wg sync.WaitGroup for i, feature := range testFeatures { + featureCopy := feature featName := feature.Name() if featName == "" { featName = fmt.Sprintf("Feature-%d", i+1) } if runInParallel { wg.Add(1) - go func(w *sync.WaitGroup) { + go func(ctx context.Context, w *sync.WaitGroup, featName string, f types.Feature) { defer w.Done() - e.processTestFeature(t, featName, feature) - }(&wg) + _ = e.processTestFeature(ctx, t, featName, f) + }(ctx, &wg, featName, featureCopy) } else { - e.processTestFeature(t, featName, feature) + ctx = e.processTestFeature(ctx, t, featName, featureCopy) + // In case if the feature under test has failed, skip reset of the features + // that are part of the same test + if e.cfg.FailFast() && t.Failed() { + break + } } } if runInParallel { wg.Wait() } - e.processTestActions(t, afterTestActions) + return e.processTestActions(ctx, t, afterTestActions) } // TestInParallel executes a series a feature tests from within a @@ -281,8 +302,8 @@ func (e *testEnv) processTests(t *testing.T, enableParallelRun bool, testFeature // set of features being passed to this call while the feature themselves // are executed in parallel to avoid duplication of action that might happen // in BeforeTest and AfterTest actions -func (e *testEnv) TestInParallel(t *testing.T, testFeatures ...types.Feature) { - e.processTests(t, true, testFeatures...) +func (e *testEnv) TestInParallel(t *testing.T, testFeatures ...types.Feature) context.Context { + return e.processTests(e.ctx, t, true, testFeatures...) } // Test executes a feature test from within a TestXXX function. @@ -297,8 +318,8 @@ func (e *testEnv) TestInParallel(t *testing.T, testFeatures ...types.Feature) { // // BeforeTest and AfterTest operations are executed before and after // the feature is tested respectively. -func (e *testEnv) Test(t *testing.T, testFeatures ...types.Feature) { - e.processTests(t, false, testFeatures...) +func (e *testEnv) Test(t *testing.T, testFeatures ...types.Feature) context.Context { + return e.processTests(e.ctx, t, false, testFeatures...) } // Finish registers funcs that are executed at the end of the @@ -317,35 +338,47 @@ func (e *testEnv) Finish(funcs ...Func) types.Environment { // package. This method will all Env.Setup operations prior to // starting the tests and run all Env.Finish operations after // before completing the suite. -// func (e *testEnv) Run(m *testing.M) int { - if e.ctx == nil { - panic("context not set") // something is terribly wrong. - } + e.panicOnMissingContext() + ctx := e.ctx setups := e.getSetupActions() // fail fast on setup, upon err exit var err error - for _, setup := range setups { - // context passed down to each setup - if e.ctx, err = setup.run(e.ctx, e.cfg); err != nil { - log.Fatal(err) + + defer func() { + // Recover and see if the panic handler is disabled. If it is disabled, panic and stop the workflow. + // Otherwise, log and continue with running the Finish steps of the Test suite + rErr := recover() + if rErr != nil { + if e.cfg.DisableGracefulTeardown() { + panic(rErr) + } + klog.Errorf("Recovering from panic and running finish actions: %s, stack: %s", rErr, string(debug.Stack())) } - } - exitCode := m.Run() // exec test suite + finishes := e.getFinishActions() + // attempt to gracefully clean up. + // Upon error, log and continue. + for _, fin := range finishes { + // context passed down to each finish step + if ctx, err = fin.run(ctx, e.cfg); err != nil { + klog.V(2).ErrorS(err, "Cleanup failed", "action", fin.role) + } + } + e.ctx = ctx + }() - finishes := e.getFinishActions() - // attempt to gracefully clean up. - // Upon error, log and continue. - for _, fin := range finishes { - // context passed down to each finish step - if e.ctx, err = fin.run(e.ctx, e.cfg); err != nil { - log.V(2).ErrorS(err, "Finish action handlers") + for _, setup := range setups { + // context passed down to each setup + if ctx, err = setup.run(ctx, e.cfg); err != nil { + klog.Fatalf("%s failure: %s", setup.role, err) } } + e.ctx = ctx - return exitCode + // Execute the test suite + return m.Run() } func (e *testEnv) getActionsByRole(r actionRole) []action { @@ -387,78 +420,156 @@ func (e *testEnv) getFinishActions() []action { return e.getActionsByRole(roleFinish) } +func (e *testEnv) executeSteps(ctx context.Context, t *testing.T, steps []types.Step) context.Context { + if e.cfg.DryRunMode() { + return ctx + } + for _, setup := range steps { + ctx = setup.Func()(ctx, t, e.cfg) + } + return ctx +} + func (e *testEnv) execFeature(ctx context.Context, t *testing.T, featName string, f types.Feature) context.Context { // feature-level subtest - t.Run(featName, func(t *testing.T) { - // skip feature which matches with --skip-feature - if e.cfg.SkipFeatureRegex() != nil && e.cfg.SkipFeatureRegex().MatchString(featName) { - t.Skipf(`Skipping feature "%s": name matched`, featName) - } - - // skip feature which does not match with --feature - if e.cfg.FeatureRegex() != nil && !e.cfg.FeatureRegex().MatchString(featName) { - t.Skipf(`Skipping feature "%s": name not matched`, featName) - } - - // skip if labels does not match - // run tests if --labels values matches the feature labels - for k, v := range e.cfg.Labels() { - if f.Labels()[k] != v { - t.Skipf(`Skipping feature "%s": unmatched label "%s=%s"`, featName, k, f.Labels()[k]) - } + t.Run(featName, func(newT *testing.T) { + skipped, message := e.requireFeatureProcessing(f) + if skipped { + newT.Skipf(message) } - // skip running a feature if labels matches with --skip-labels - for k, v := range e.cfg.SkipLabels() { - if f.Labels()[k] == v { - t.Skipf(`Skipping feature "%s": matched label provided in --skip-lables "%s=%s"`, featName, k, f.Labels()[k]) - } + if fDescription, ok := f.(types.DescribableFeature); ok && fDescription.Description() != "" { + t.Logf("Processing Feature: %s", fDescription.Description()) } // setups run at feature-level setups := features.GetStepsByLevel(f.Steps(), types.LevelSetup) - for _, setup := range setups { - ctx = setup.Func()(ctx, t, e.cfg) - } + ctx = e.executeSteps(ctx, newT, setups) // assessments run as feature/assessment sub level assessments := features.GetStepsByLevel(f.Steps(), types.LevelAssess) + failed := false for i, assess := range assessments { assessName := assess.Name() + if dAssess, ok := assess.(types.DescribableStep); ok && dAssess.Description() != "" { + t.Logf("Processing Assessment: %s", dAssess.Description()) + } if assessName == "" { assessName = fmt.Sprintf("Assessment-%d", i+1) } - t.Run(assessName, func(t *testing.T) { - // skip assessments which matches with --skip-assessments - if e.cfg.SkipAssessmentRegex() != nil && e.cfg.SkipAssessmentRegex().MatchString(assess.Name()) { - t.Skipf(`Skipping assessment "%s": name matched`, assess.Name()) - } - - // skip assessments which does not matches with --assess - if e.cfg.AssessmentRegex() != nil && !e.cfg.AssessmentRegex().MatchString(assess.Name()) { - t.Skipf(`Skipping assessment "%s": name not matched`, assess.Name()) + newT.Run(assessName, func(internalT *testing.T) { + skipped, message := e.requireAssessmentProcessing(assess, i+1) + if skipped { + internalT.Skipf(message) } - ctx = assess.Func()(ctx, t, e.cfg) + ctx = e.executeSteps(ctx, internalT, []types.Step{assess}) }) + // Check if the Test assessment under question performed a `t.Fail()` or `t.Failed()` invocation. + // We need to track that and stop the next set of assessment in the feature under test from getting + // executed + if e.cfg.FailFast() && newT.Failed() { + failed = true + break + } + } + + // Let us fail the test fast and not run the teardown in case if the framework specific fail-fast mode is + // invoked to make sure we leave the traces of the failed test behind to enable better debugging for the + // test developers + if e.cfg.FailFast() && failed { + newT.FailNow() } // teardowns run at feature-level teardowns := features.GetStepsByLevel(f.Steps(), types.LevelTeardown) - for _, teardown := range teardowns { - ctx = teardown.Func()(ctx, t, e.cfg) - } + ctx = e.executeSteps(ctx, newT, teardowns) }) return ctx } +// requireFeatureProcessing is a wrapper around the requireProcessing function to process the feature level validation +func (e *testEnv) requireFeatureProcessing(f types.Feature) (skip bool, message string) { + requiredRegexp := e.cfg.FeatureRegex() + skipRegexp := e.cfg.SkipFeatureRegex() + return e.requireProcessing("feature", f.Name(), requiredRegexp, skipRegexp, f.Labels()) +} + +// requireAssessmentProcessing is a wrapper around the requireProcessing function to process the Assessment level validation +func (e *testEnv) requireAssessmentProcessing(a types.Step, assessmentIndex int) (skip bool, message string) { + requiredRegexp := e.cfg.AssessmentRegex() + skipRegexp := e.cfg.SkipAssessmentRegex() + assessmentName := a.Name() + if assessmentName == "" { + assessmentName = fmt.Sprintf("Assessment-%d", assessmentIndex) + } + return e.requireProcessing("assessment", assessmentName, requiredRegexp, skipRegexp, nil) +} + +// requireProcessing is a utility function that can be used to make a decision on if a specific Test assessment or feature needs to be +// processed or not. +// testName argument indicate the Feature Name or test Name that can be mapped against the skip or include regex flags +// to decide if the entity in question will need processing. +// This function also perform a label check against include/skip labels to make sure only those features to make sure +// we can filter out all the non-required features during the test execution +func (e *testEnv) requireProcessing(kind, testName string, requiredRegexp, skipRegexp *regexp.Regexp, labels types.Labels) (skip bool, message string) { + if requiredRegexp != nil && !requiredRegexp.MatchString(testName) { + skip = true + message = fmt.Sprintf(`Skipping %s "%s": name not matched`, kind, testName) + return skip, message + } + if skipRegexp != nil && skipRegexp.MatchString(testName) { + skip = true + message = fmt.Sprintf(`Skipping %s: "%s": name matched`, kind, testName) + return skip, message + } + + if labels != nil { + // only run a feature if all its label keys and values match those specified + // with --labels + matches := 0 + for key, vals := range e.cfg.Labels() { + for _, v := range vals { + if labels.Contains(key, v) { + matches++ + break // continue with next key + } + } + } + + if len(e.cfg.Labels()) != matches { + skip = true + var kvs []string + for k, v := range labels { + kvs = append(kvs, fmt.Sprintf("%s=%s", k, v)) // prettify output + } + message = fmt.Sprintf(`Skipping feature "%s": unmatched labels "%s"`, testName, kvs) + return skip, message + } + + // skip running a feature if labels matches with --skip-labels + for key, vals := range e.cfg.SkipLabels() { + for _, v := range vals { + if labels.Contains(key, v) { + skip = true + message = fmt.Sprintf(`Skipping feature "%s": matched label provided in --skip-lables "%s=%s"`, testName, key, labels[key]) + return skip, message + } + } + } + } + return skip, message +} + // deepCopyFeature just copies the values from the Feature but creates a deep // copy to avoid mutation when we just want an informational copy. func deepCopyFeature(f types.Feature) types.Feature { fcopy := features.New(f.Name()) - for k, v := range f.Labels() { - fcopy = fcopy.WithLabel(k, v) + for k, vals := range f.Labels() { + for _, v := range vals { + fcopy = fcopy.WithLabel(k, v) + } } f.Steps() for _, step := range f.Steps() { diff --git a/vendor/sigs.k8s.io/e2e-framework/pkg/envconf/config.go b/vendor/sigs.k8s.io/e2e-framework/pkg/envconf/config.go index 4b154487c..1534c5646 100644 --- a/vendor/sigs.k8s.io/e2e-framework/pkg/envconf/config.go +++ b/vendor/sigs.k8s.io/e2e-framework/pkg/envconf/config.go @@ -31,16 +31,20 @@ import ( // Config represents and environment configuration type Config struct { - client klient.Client - kubeconfig string - namespace string - assessmentRegex *regexp.Regexp - featureRegex *regexp.Regexp - labels map[string]string - skipFeatureRegex *regexp.Regexp - skipLabels map[string]string - skipAssessmentRegex *regexp.Regexp - parallelTests bool + client klient.Client + kubeconfig string + namespace string + assessmentRegex *regexp.Regexp + featureRegex *regexp.Regexp + labels flags.LabelsMap + skipFeatureRegex *regexp.Regexp + skipLabels flags.LabelsMap + skipAssessmentRegex *regexp.Regexp + parallelTests bool + dryRun bool + failFast bool + disableGracefulTeardown bool + kubeContext string } // New creates and initializes an empty environment configuration @@ -79,6 +83,10 @@ func NewFromFlags() (*Config, error) { } e.skipLabels = envFlags.SkipLabels() e.parallelTests = envFlags.Parallel() + e.dryRun = envFlags.DryRun() + e.failFast = envFlags.FailFast() + e.disableGracefulTeardown = envFlags.DisableGracefulTeardown() + e.kubeContext = envFlags.KubeContext() return e, nil } @@ -112,12 +120,13 @@ func (c *Config) NewClient() (klient.Client, error) { return nil, fmt.Errorf("envconfig: client failed: %w", err) } c.client = client + return c.client, nil } // Client is a constructor function that returns a previously -// created klient.Client or create a new one based on configuration -// previously set. Willpanic on any error so it recommended that you +// created klient.Client or creates a new one based on configuration +// previously set. Will panic on any error so it is recommended that you // are confident in the configuration or call NewClient() to ensure its // safe creation. func (c *Config) Client() klient.Client { @@ -196,36 +205,88 @@ func (c *Config) SkipFeatureRegex() *regexp.Regexp { } // WithLabels sets the environment label filters -func (c *Config) WithLabels(lbls map[string]string) *Config { +func (c *Config) WithLabels(lbls map[string][]string) *Config { c.labels = lbls return c } // Labels returns the environment's label filters -func (c *Config) Labels() map[string]string { +func (c *Config) Labels() map[string][]string { return c.labels } // WithSkipLabels sets the environment label filters -func (c *Config) WithSkipLabels(lbls map[string]string) *Config { +func (c *Config) WithSkipLabels(lbls map[string][]string) *Config { c.skipLabels = lbls return c } // SkipLabels returns the environment's label filters -func (c *Config) SkipLabels() map[string]string { +func (c *Config) SkipLabels() map[string][]string { return c.skipLabels } +// WithParallelTestEnabled can be used to enable parallel run of the test +// features func (c *Config) WithParallelTestEnabled() *Config { c.parallelTests = true return c } +// ParallelTestEnabled indicates if the test features are being run in +// parallel or not func (c *Config) ParallelTestEnabled() bool { return c.parallelTests } +func (c *Config) WithDryRunMode() *Config { + c.dryRun = true + return c +} + +func (c *Config) DryRunMode() bool { + return c.dryRun +} + +// WithFailFast can be used to enable framework specific fail fast mode +// that controls the test execution of the features and assessments under +// test +func (c *Config) WithFailFast() *Config { + c.failFast = true + return c +} + +// FailFast indicate if the framework is running in fail fast mode. This +// controls the behavior of how the assessments and features are handled +// if a test encounters a failure result +func (c *Config) FailFast() bool { + return c.failFast +} + +// WithDisableGracefulTeardown can be used to programmatically disabled the panic +// recovery enablement on test startup. This will prevent test Finish steps +// from being executed on panic +func (c *Config) WithDisableGracefulTeardown() *Config { + c.disableGracefulTeardown = true + return c +} + +// DisableGracefulTeardown is used to check the panic recovery handler should be enabled +func (c *Config) DisableGracefulTeardown() bool { + return c.disableGracefulTeardown +} + +// WithKubeContext is used to set the kubeconfig context +func (c *Config) WithKubeContext(kubeContext string) *Config { + c.kubeContext = kubeContext + return c +} + +// WithKubeContext is used to get the kubeconfig context +func (c *Config) KubeContext() string { + return c.kubeContext +} + func randNS() string { return RandomName("testns-", 32) } diff --git a/vendor/sigs.k8s.io/e2e-framework/pkg/envfuncs/kind_funcs.go b/vendor/sigs.k8s.io/e2e-framework/pkg/envfuncs/kind_funcs.go index a70e563a8..66f621bb5 100644 --- a/vendor/sigs.k8s.io/e2e-framework/pkg/envfuncs/kind_funcs.go +++ b/vendor/sigs.k8s.io/e2e-framework/pkg/envfuncs/kind_funcs.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,184 +18,38 @@ package envfuncs import ( "context" - "fmt" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/e2e-framework/klient" - "sigs.k8s.io/e2e-framework/klient/k8s/resources" - "sigs.k8s.io/e2e-framework/klient/wait" - "sigs.k8s.io/e2e-framework/klient/wait/conditions" "sigs.k8s.io/e2e-framework/pkg/env" - "sigs.k8s.io/e2e-framework/pkg/envconf" "sigs.k8s.io/e2e-framework/support/kind" ) -type kindContextKey string - -// CreateKindCluster returns an env.Func that is used to -// create a kind cluster that is then injected in the context -// using the name as a key. -// -// NOTE: the returned function will update its env config with the -// kubeconfig file for the config client. -// -func CreateKindCluster(clusterName string) env.Func { - return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { - k := kind.NewCluster(clusterName) - kubecfg, err := k.Create() - if err != nil { - return ctx, err - } - - // update envconfig with kubeconfig - cfg.WithKubeconfigFile(kubecfg) - - // stall, wait for pods initializations - if err := waitForControlPlane(cfg.Client()); err != nil { - return ctx, err - } - - // store entire cluster value in ctx for future access using the cluster name - return context.WithValue(ctx, kindContextKey(clusterName), k), nil +// Deprecated: This handler has been deprecated in favor of GetClusterFromContext +func GetKindClusterFromContext(ctx context.Context, clusterName string) (*kind.Cluster, bool) { + provider, ok := GetClusterFromContext(ctx, clusterName) + if ok { + return provider.(*kind.Cluster), ok } + return nil, ok } -// CreateKindClusterWithConfig returns an env.Func that is used to -// create a kind cluster that is then injected in the context -// using the name as a key. -// -// NOTE: the returned function will update its env config with the -// kubeconfig file for the config client. -// -func CreateKindClusterWithConfig(clusterName, image, configFilePath string) env.Func { - return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { - k := kind.NewCluster(clusterName) - kubecfg, err := k.CreateWithConfig(image, configFilePath) - if err != nil { - return ctx, err - } - - // update envconfig with kubeconfig - cfg.WithKubeconfigFile(kubecfg) - - // stall, wait for pods initializations - if err := waitForControlPlane(cfg.Client()); err != nil { - return ctx, err - } - - // store entire cluster value in ctx for future access using the cluster name - return context.WithValue(ctx, kindContextKey(clusterName), k), nil - } +// Deprecated: This handler has been deprecated in favor of CreateCluster which can now accept +// support.ClusterProvider type as input in order to setup the cluster using right providers +func CreateKindCluster(clusterName string) env.Func { + return CreateCluster(kind.NewProvider(), clusterName) } -func waitForControlPlane(client klient.Client) error { - r, err := resources.New(client.RESTConfig()) - if err != nil { - return err - } - selector, err := metav1.LabelSelectorAsSelector( - &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - {Key: "component", Operator: metav1.LabelSelectorOpIn, Values: []string{"etcd", "kube-apiserver", "kube-controller-manager", "kube-scheduler"}}, - }, - }, - ) - if err != nil { - return err - } - // a kind cluster with one control-plane node will have 4 pods running the core apiserver components - err = wait.For(conditions.New(r).ResourceListN(&v1.PodList{}, 4, resources.WithLabelSelector(selector.String()))) - if err != nil { - return err - } - selector, err = metav1.LabelSelectorAsSelector( - &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - {Key: "k8s-app", Operator: metav1.LabelSelectorOpIn, Values: []string{"kindnet", "kube-dns", "kube-proxy"}}, - }, - }, - ) - if err != nil { - return err - } - // a kind cluster with one control-plane node will have 4 k8s-app pods running networking components - err = wait.For(conditions.New(r).ResourceListN(&v1.PodList{}, 4, resources.WithLabelSelector(selector.String()))) - if err != nil { - return err - } - return nil +// Deprecated: This handler has been deprecated in favor of CreateClusterWithConfig which can now accept +// support.ClusterProvider type as input in order to setup the cluster using right providers +func CreateKindClusterWithConfig(clusterName, image, configFilePath string) env.Func { + return CreateClusterWithConfig(kind.NewProvider(), clusterName, configFilePath, kind.WithImage(image)) } -// DestroyKindCluster returns an EnvFunc that -// retrieves a previously saved kind Cluster in the context (using the name), then deletes it. -// -// NOTE: this should be used in a Environment.Finish step. -// +// Deprecated: This handler has been deprecated in favor of DestroyCluster func DestroyKindCluster(name string) env.Func { - return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { - clusterVal := ctx.Value(kindContextKey(name)) - if clusterVal == nil { - return ctx, fmt.Errorf("destroy kind cluster func: context cluster is nil") - } - - cluster, ok := clusterVal.(*kind.Cluster) - if !ok { - return ctx, fmt.Errorf("destroy kind cluster func: unexpected type for cluster value") - } - - if err := cluster.Destroy(); err != nil { - return ctx, fmt.Errorf("destroy kind cluster: %w", err) - } - - return ctx, nil - } + return DestroyCluster(name) } -// LoadDockerImageToCluster returns an EnvFunc that -// retrieves a previously saved kind Cluster in the context (using the name), and then loads a docker image -// from the host into the cluster. -// -func LoadDockerImageToCluster(name, image string) env.Func { - return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { - clusterVal := ctx.Value(kindContextKey(name)) - if clusterVal == nil { - return ctx, fmt.Errorf("load docker image func: context cluster is nil") - } - - cluster, ok := clusterVal.(*kind.Cluster) - if !ok { - return ctx, fmt.Errorf("load docker image func: unexpected type for cluster value") - } - - if err := cluster.LoadDockerImage(image); err != nil { - return ctx, fmt.Errorf("load docker image: %w", err) - } - - return ctx, nil - } -} - -// LoadImageArchiveToCluster returns an EnvFunc that -// retrieves a previously saved kind Cluster in the context (using the name), and then loads a docker image TAR archive -// from the host into the cluster. -// -func LoadImageArchiveToCluster(name, imageArchive string) env.Func { - return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { - clusterVal := ctx.Value(kindContextKey(name)) - if clusterVal == nil { - return ctx, fmt.Errorf("load image archive func: context cluster is nil") - } - - cluster, ok := clusterVal.(*kind.Cluster) - if !ok { - return ctx, fmt.Errorf("load image archive func: unexpected type for cluster value") - } - - if err := cluster.LoadImageArchive(imageArchive); err != nil { - return ctx, fmt.Errorf("load image archive: %w", err) - } - - return ctx, nil - } +// Deprecated: This handler has been deprecated in favor of ExportClusterLogs +func ExportKindClusterLogs(name, dest string) env.Func { + return ExportClusterLogs(name, dest) } diff --git a/vendor/sigs.k8s.io/e2e-framework/pkg/envfuncs/provider_funcs.go b/vendor/sigs.k8s.io/e2e-framework/pkg/envfuncs/provider_funcs.go new file mode 100644 index 000000000..6fd612f7c --- /dev/null +++ b/vendor/sigs.k8s.io/e2e-framework/pkg/envfuncs/provider_funcs.go @@ -0,0 +1,189 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envfuncs + +import ( + "context" + "fmt" + + "sigs.k8s.io/e2e-framework/pkg/env" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/support" +) + +type clusterNameContextKey string + +var LoadDockerImageToCluster = LoadImageToCluster + +// GetClusterFromContext helps extract the E2EClusterProvider object from the context. +// This can be used to setup and run tests of multi cluster e2e Prioviders. +func GetClusterFromContext(ctx context.Context, clusterName string) (support.E2EClusterProvider, bool) { + c := ctx.Value(clusterNameContextKey(clusterName)) + if c == nil { + return nil, false + } + cluster, ok := c.(support.E2EClusterProvider) + return cluster, ok +} + +// CreateCluster returns an env.Func that is used to +// create an E2E provider cluster that is then injected in the context +// using the name as a key. +// +// NOTE: the returned function will update its env config with the +// kubeconfig file for the config client. +func CreateCluster(p support.E2EClusterProvider, clusterName string) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + k := p.SetDefaults().WithName(clusterName) + kubecfg, err := k.Create(ctx) + if err != nil { + return ctx, err + } + + // update envconfig with kubeconfig + cfg.WithKubeconfigFile(kubecfg) + + // stall, wait for pods initializations + if err := k.WaitForControlPlane(ctx, cfg.Client()); err != nil { + return ctx, err + } + + // store entire cluster value in ctx for future access using the cluster name + return context.WithValue(ctx, clusterNameContextKey(clusterName), k), nil + } +} + +// CreateClusterWithConfig returns an env.Func that is used to +// create a e2e provider cluster that is then injected in the context +// using the name as a key. +// +// NOTE: the returned function will update its env config with the +// kubeconfig file for the config client. +func CreateClusterWithConfig(p support.E2EClusterProvider, clusterName, configFilePath string, opts ...support.ClusterOpts) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + k := p.SetDefaults().WithName(clusterName).WithOpts(opts...) + kubecfg, err := k.CreateWithConfig(ctx, configFilePath) + if err != nil { + return ctx, err + } + + cfg.Client().RESTConfig() + // update envconfig with kubeconfig + cfg.WithKubeconfigFile(kubecfg) + + // stall, wait for pods initializations + if err := k.WaitForControlPlane(ctx, cfg.Client()); err != nil { + return ctx, err + } + + // store entire cluster value in ctx for future access using the cluster name + return context.WithValue(ctx, clusterNameContextKey(clusterName), k), nil + } +} + +// DestroyCluster returns an EnvFunc that +// retrieves a previously saved e2e provider Cluster in the context (using the name), then deletes it. +// +// NOTE: this should be used in a Environment.Finish step. +func DestroyCluster(name string) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + clusterVal := ctx.Value(clusterNameContextKey(name)) + if clusterVal == nil { + return ctx, fmt.Errorf("destroy e2e provider cluster func: context cluster is nil") + } + + cluster, ok := clusterVal.(support.E2EClusterProvider) + if !ok { + return ctx, fmt.Errorf("destroy e2e provider cluster func: unexpected type for cluster value") + } + + if err := cluster.Destroy(ctx); err != nil { + return ctx, fmt.Errorf("destroy e2e provider cluster: %w", err) + } + + return ctx, nil + } +} + +// LoadImageToCluster returns an EnvFunc that +// retrieves a previously saved e2e provider Cluster in the context (using the name), and then loads a container image +// from the host into the cluster. +func LoadImageToCluster(name, image string) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + clusterVal := ctx.Value(clusterNameContextKey(name)) + if clusterVal == nil { + return ctx, fmt.Errorf("load image func: context cluster is nil") + } + + cluster, ok := clusterVal.(support.E2EClusterProviderWithImageLoader) + if !ok { + return ctx, fmt.Errorf("load image archive func: cluster provider does not support LoadImage helper") + } + + if err := cluster.LoadImage(ctx, image); err != nil { + return ctx, fmt.Errorf("load image: %w", err) + } + + return ctx, nil + } +} + +// LoadImageArchiveToCluster returns an EnvFunc that +// retrieves a previously saved e2e provider Cluster in the context (using the name), and then loads a container image TAR archive +// from the host into the cluster. +func LoadImageArchiveToCluster(name, imageArchive string) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + clusterVal := ctx.Value(clusterNameContextKey(name)) + if clusterVal == nil { + return ctx, fmt.Errorf("load image archive func: context cluster is nil") + } + + cluster, ok := clusterVal.(support.E2EClusterProviderWithImageLoader) + if !ok { + return ctx, fmt.Errorf("load image archive func: cluster provider does not support LoadImageArchive helper") + } + + if err := cluster.LoadImageArchive(ctx, imageArchive); err != nil { + return ctx, fmt.Errorf("load image archive: %w", err) + } + + return ctx, nil + } +} + +// ExportClusterLogs returns an EnvFunc that +// retrieves a previously saved e2e provider Cluster in the context (using the name), and then export cluster logs +// in the provided destination. +func ExportClusterLogs(name, dest string) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + clusterVal := ctx.Value(clusterNameContextKey(name)) + if clusterVal == nil { + return ctx, fmt.Errorf("export e2e provider cluster logs: context cluster is nil") + } + + cluster, ok := clusterVal.(support.E2EClusterProvider) + if !ok { + return ctx, fmt.Errorf("export e2e provider cluster logs: unexpected type for cluster value") + } + + if err := cluster.ExportLogs(ctx, dest); err != nil { + return ctx, fmt.Errorf("load image archive: %w", err) + } + + return ctx, nil + } +} diff --git a/vendor/sigs.k8s.io/e2e-framework/pkg/envfuncs/resource_funcs.go b/vendor/sigs.k8s.io/e2e-framework/pkg/envfuncs/resource_funcs.go new file mode 100644 index 000000000..54a5f565a --- /dev/null +++ b/vendor/sigs.k8s.io/e2e-framework/pkg/envfuncs/resource_funcs.go @@ -0,0 +1,51 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envfuncs + +import ( + "context" + + "sigs.k8s.io/e2e-framework/klient/decoder" + "sigs.k8s.io/e2e-framework/klient/k8s/resources" + "sigs.k8s.io/e2e-framework/pkg/env" + "sigs.k8s.io/e2e-framework/pkg/envconf" +) + +// SetupCRDs is provided as a helper env.Func handler that can be used to setup the CRDs that are required +// to process your controller code for testing. For additional control on resource creation handling, please +// use the decoder.ApplyWithManifestDir directly with suitable arguments to customize the behavior +func SetupCRDs(crdPath, pattern string) env.Func { + return func(ctx context.Context, c *envconf.Config) (context.Context, error) { + r, err := resources.New(c.Client().RESTConfig()) + if err != nil { + return ctx, err + } + return ctx, decoder.ApplyWithManifestDir(ctx, r, crdPath, pattern, []resources.CreateOption{}) + } +} + +// TeardownCRDs is provided as a handler function that can be hooked into your test's teardown sequence to +// make sure that you can cleanup the CRDs that were setup as part of the SetupCRDs hook +func TeardownCRDs(crdPath, pattern string) env.Func { + return func(ctx context.Context, c *envconf.Config) (context.Context, error) { + r, err := resources.New(c.Client().RESTConfig()) + if err != nil { + return ctx, err + } + return ctx, decoder.DeleteWithManifestDir(ctx, r, crdPath, pattern, []resources.DeleteOption{}) + } +} diff --git a/vendor/sigs.k8s.io/e2e-framework/pkg/features/builder.go b/vendor/sigs.k8s.io/e2e-framework/pkg/features/builder.go index 47f56cfb3..7a79b604b 100644 --- a/vendor/sigs.k8s.io/e2e-framework/pkg/features/builder.go +++ b/vendor/sigs.k8s.io/e2e-framework/pkg/features/builder.go @@ -29,12 +29,16 @@ type FeatureBuilder struct { } func New(name string) *FeatureBuilder { - return &FeatureBuilder{feat: newDefaultFeature(name)} + return &FeatureBuilder{feat: newDefaultFeature(name, "")} +} + +func NewWithDescription(name, description string) *FeatureBuilder { + return &FeatureBuilder{feat: newDefaultFeature(name, description)} } // WithLabel adds a test label key/value pair func (b *FeatureBuilder) WithLabel(key, value string) *FeatureBuilder { - b.feat.labels[key] = value + b.feat.labels[key] = append(b.feat.labels[key], value) return b } @@ -44,22 +48,40 @@ func (b *FeatureBuilder) WithStep(name string, level Level, fn Func) *FeatureBui return b } +func (b *FeatureBuilder) WithStepDescription(name, description string, level Level, fn Func) *FeatureBuilder { + b.feat.steps = append(b.feat.steps, newStepWithDescription(name, description, level, fn)) + return b +} + // Setup adds a new setup step that will be applied prior to feature test. func (b *FeatureBuilder) Setup(fn Func) *FeatureBuilder { - b.feat.steps = append(b.feat.steps, newStep(fmt.Sprintf("%s-setup", b.feat.name), types.LevelSetup, fn)) - return b + return b.WithSetup(fmt.Sprintf("%s-setup", b.feat.name), fn) +} + +// WithSetup adds a new setup step with a pre-defined setup name instead of automating +// the setup name generation. This can make tests more readable. +func (b *FeatureBuilder) WithSetup(name string, fn Func) *FeatureBuilder { + return b.WithStep(name, types.LevelSetup, fn) } // Teardown adds a new teardown step that will be applied after feature test. func (b *FeatureBuilder) Teardown(fn Func) *FeatureBuilder { - b.feat.steps = append(b.feat.steps, newStep(fmt.Sprintf("%s-teardown", b.feat.name), types.LevelTeardown, fn)) - return b + return b.WithTeardown(fmt.Sprintf("%s-teardown", b.feat.name), fn) +} + +// WithTeardown adds a new teardown step with a pre-defined name instead of an +// auto-generated one +func (b *FeatureBuilder) WithTeardown(name string, fn Func) *FeatureBuilder { + return b.WithStep(name, types.LevelTeardown, fn) } // Assess adds an assessment step to the feature test. func (b *FeatureBuilder) Assess(desc string, fn Func) *FeatureBuilder { - b.feat.steps = append(b.feat.steps, newStep(desc, types.LevelAssess, fn)) - return b + return b.WithStep(desc, types.LevelAssess, fn) +} + +func (b *FeatureBuilder) AssessWithDescription(name, description string, fn Func) *FeatureBuilder { + return b.WithStepDescription(name, description, types.LevelAssess, fn) } // Feature returns a feature configured by builder. diff --git a/vendor/sigs.k8s.io/e2e-framework/pkg/features/feature.go b/vendor/sigs.k8s.io/e2e-framework/pkg/features/feature.go index 99ba1032c..df46f9dd1 100644 --- a/vendor/sigs.k8s.io/e2e-framework/pkg/features/feature.go +++ b/vendor/sigs.k8s.io/e2e-framework/pkg/features/feature.go @@ -31,13 +31,14 @@ type ( ) type defaultFeature struct { - name string - labels types.Labels - steps []types.Step + name string + description string + labels types.Labels + steps []types.Step } -func newDefaultFeature(name string) *defaultFeature { - return &defaultFeature{name: name, labels: make(types.Labels)} +func newDefaultFeature(name, description string) *defaultFeature { + return &defaultFeature{name: name, description: description, labels: make(types.Labels), steps: make([]types.Step, 0)} } func (f *defaultFeature) Name() string { @@ -52,17 +53,27 @@ func (f *defaultFeature) Steps() []types.Step { return f.steps } +func (f *defaultFeature) Description() string { + return f.description +} + type testStep struct { - name string - level Level - fn Func + name string + description string + level Level + fn Func } func newStep(name string, level Level, fn Func) *testStep { + return newStepWithDescription(name, "", level, fn) +} + +func newStepWithDescription(name, description string, level Level, fn Func) *testStep { return &testStep{ - name: name, - level: level, - fn: fn, + name: name, + description: description, + level: level, + fn: fn, } } @@ -78,6 +89,10 @@ func (s *testStep) Func() Func { return s.fn } +func (s *testStep) Description() string { + return s.description +} + func GetStepsByLevel(steps []types.Step, l types.Level) []types.Step { if steps == nil { return nil diff --git a/vendor/sigs.k8s.io/e2e-framework/pkg/features/table.go b/vendor/sigs.k8s.io/e2e-framework/pkg/features/table.go index d2afed90c..520e89a6b 100644 --- a/vendor/sigs.k8s.io/e2e-framework/pkg/features/table.go +++ b/vendor/sigs.k8s.io/e2e-framework/pkg/features/table.go @@ -23,26 +23,31 @@ import ( // Table provides a structure for table-driven tests. // Each entry in the table represents an executable assessment. type Table []struct { - Name string - Assessment Func + Name string + Description string + Assessment Func } // Build converts the defined test steps in the table // into a FeatureBuilder which can be used to add additional attributes // to the feature before it's exercised. Build takes an optional feature name // if omitted will be generated. -func (table Table) Build(featureName ...string) *FeatureBuilder { +func (table Table) Build(args ...string) *FeatureBuilder { var name string - if len(featureName) > 0 { - name = featureName[0] + var description string + if len(args) > 0 { + name = args[0] } - f := New(name) + if len(args) > 1 { + description = args[1] + } + f := NewWithDescription(name, description) for i, test := range table { if test.Name == "" { test.Name = fmt.Sprintf("Assessment-%d", i) } if test.Assessment != nil { - f.Assess(test.Name, test.Assessment) + f.AssessWithDescription(test.Name, test.Description, test.Assessment) } } return f diff --git a/vendor/sigs.k8s.io/e2e-framework/pkg/flags/flags.go b/vendor/sigs.k8s.io/e2e-framework/pkg/flags/flags.go index a83e474ae..763dd2d85 100644 --- a/vendor/sigs.k8s.io/e2e-framework/pkg/flags/flags.go +++ b/vendor/sigs.k8s.io/e2e-framework/pkg/flags/flags.go @@ -26,15 +26,19 @@ import ( ) const ( - flagNamespaceName = "namespace" - flagKubecofigName = "kubeconfig" - flagFeatureName = "feature" - flagAssessName = "assess" - flagLabelsName = "labels" - flagSkipLabelName = "skip-labels" - flagSkipFeatureName = "skip-features" - flagSkipAssessmentName = "skip-assessment" - flagParallelTestsName = "parallel" + flagNamespaceName = "namespace" + flagKubecofigName = "kubeconfig" + flagFeatureName = "feature" + flagAssessName = "assess" + flagLabelsName = "labels" + flagSkipLabelName = "skip-labels" + flagSkipFeatureName = "skip-features" + flagSkipAssessmentName = "skip-assessment" + flagParallelTestsName = "parallel" + flagDryRunName = "dry-run" + flagFailFast = "fail-fast" + flagDisableGracefulTeardown = "disable-graceful-teardown" + flagContext = "context" ) // Supported flag definitions @@ -75,19 +79,39 @@ var ( Name: flagParallelTestsName, Usage: "Run test features in parallel", } + dryRunFlag = flag.Flag{ + Name: flagDryRunName, + Usage: "Run Test suite in dry-run mode. This will list the tests to be executed without actually running them", + } + failFastFlag = flag.Flag{ + Name: flagFailFast, + Usage: "Fail immediately and stop running untested code", + } + disableGracefulTeardownFlag = flag.Flag{ + Name: flagDisableGracefulTeardown, + Usage: "Ignore panic recovery while running tests. This will prevent test finish steps from getting executed on panic", + } + contextFlag = flag.Flag{ + Name: flagContext, + Usage: "The name of the kubeconfig context to use", + } ) // EnvFlags surfaces all resolved flag values for the testing framework type EnvFlags struct { - feature string - assess string - labels LabelsMap - kubeconfig string - namespace string - skiplabels LabelsMap - skipFeatures string - skipAssessments string - parallelTests bool + feature string + assess string + labels LabelsMap + kubeconfig string + namespace string + skiplabels LabelsMap + skipFeatures string + skipAssessments string + parallelTests bool + dryRun bool + failFast bool + disableGracefulTeardown bool + kubeContext string } // Feature returns value for `-feature` flag @@ -110,14 +134,21 @@ func (f *EnvFlags) Namespace() string { return f.namespace } +// SkipFeatures is used to get a RegExp pattern that can be used +// to skip test features from getting executed func (f *EnvFlags) SkipFeatures() string { return f.skipFeatures } +// SkipAssessment is used to track the RegExp pattern that can be +// used to skip certain assessments of the current feature being +// executed func (f *EnvFlags) SkipAssessment() string { return f.skipAssessments } +// SkipLabels is used to define a series of labels that can be used +// to skip test cases during execution func (f *EnvFlags) SkipLabels() LabelsMap { return f.skiplabels } @@ -127,26 +158,54 @@ func (f *EnvFlags) Kubeconfig() string { return f.kubeconfig } +// Parallel is used to indicate if the test features should be run in parallel +// under a go-routine func (f *EnvFlags) Parallel() bool { return f.parallelTests } +func (f *EnvFlags) DryRun() bool { + return f.dryRun +} + +// FailFast is used to indicate if the failure of an assessment should continue +// assessing the rest of the features or skip it and continue to the next one. +// This is set to false by default. +func (f *EnvFlags) FailFast() bool { + return f.failFast +} + +// DisableGracefulTeardown is used to indicate that the panic handlers should not be registered while +// starting the test execution. This will prevent the test Finish steps from getting executed +func (f *EnvFlags) DisableGracefulTeardown() bool { + return f.disableGracefulTeardown +} + // Parse parses defined CLI args os.Args[1:] func Parse() (*EnvFlags, error) { return ParseArgs(os.Args[1:]) } +// Context returns an optional kubeconfig context to use +func (f *EnvFlags) KubeContext() string { + return f.kubeContext +} + // ParseArgs parses the specified args from global flag.CommandLine // and returns a set of environment flag values. func ParseArgs(args []string) (*EnvFlags, error) { var ( - feature string - assess string - namespace string - kubeconfig string - skipFeature string - skipAssessment string - parallelTests bool + feature string + assess string + namespace string + kubeconfig string + skipFeature string + skipAssessment string + parallelTests bool + dryRun bool + failFast bool + disableGracefulTeardown bool + kubeContext string ) labels := make(LabelsMap) @@ -188,6 +247,22 @@ func ParseArgs(args []string) (*EnvFlags, error) { flag.BoolVar(¶llelTests, parallelTestsFlag.Name, false, parallelTestsFlag.Usage) } + if flag.Lookup(dryRunFlag.Name) == nil { + flag.BoolVar(&dryRun, dryRunFlag.Name, false, dryRunFlag.Usage) + } + + if flag.Lookup(failFastFlag.Name) == nil { + flag.BoolVar(&failFast, failFastFlag.Name, false, failFastFlag.Usage) + } + + if flag.Lookup(disableGracefulTeardownFlag.Name) == nil { + flag.BoolVar(&disableGracefulTeardown, disableGracefulTeardownFlag.Name, false, disableGracefulTeardownFlag.Usage) + } + + if flag.Lookup(contextFlag.Name) == nil { + flag.StringVar(&kubeContext, contextFlag.Name, contextFlag.DefValue, contextFlag.Usage) + } + // Enable klog/v2 flag integration klog.InitFlags(nil) @@ -195,23 +270,37 @@ func ParseArgs(args []string) (*EnvFlags, error) { return nil, fmt.Errorf("flags parsing: %w", err) } + // Hook into the default test.list of the `go test` and integrate that with the `--dry-run` behavior. Treat them the same way + if !dryRun && flag.Lookup("test.list") != nil && flag.Lookup("test.list").Value.String() == "true" { + klog.V(2).Info("Enabling dry-run mode as the tests were invoked in list mode") + dryRun = true + } + + if failFast && parallelTests { + panic(fmt.Errorf("--fail-fast and --parallel are mutually exclusive options")) + } + return &EnvFlags{ - feature: feature, - assess: assess, - labels: labels, - namespace: namespace, - kubeconfig: kubeconfig, - skiplabels: skipLabels, - skipFeatures: skipFeature, - skipAssessments: skipAssessment, - parallelTests: parallelTests, + feature: feature, + assess: assess, + labels: labels, + namespace: namespace, + kubeconfig: kubeconfig, + skiplabels: skipLabels, + skipFeatures: skipFeature, + skipAssessments: skipAssessment, + parallelTests: parallelTests, + dryRun: dryRun, + failFast: failFast, + disableGracefulTeardown: disableGracefulTeardown, + kubeContext: kubeContext, }, nil } -type LabelsMap map[string]string +type LabelsMap map[string][]string func (m LabelsMap) String() string { - i := map[string]string(m) + i := map[string][]string(m) return fmt.Sprint(i) } @@ -223,8 +312,19 @@ func (m LabelsMap) Set(val string) error { if len(kv) != 2 { return fmt.Errorf("label format error: %s", label) } - m[strings.TrimSpace(kv[0])] = strings.TrimSpace(kv[1]) + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) + m[k] = append(m[k], v) } return nil } + +func (m LabelsMap) Contains(key, val string) bool { + for _, v := range m[key] { + if val == v { + return true + } + } + return false +} diff --git a/vendor/sigs.k8s.io/e2e-framework/pkg/internal/types/types.go b/vendor/sigs.k8s.io/e2e-framework/pkg/internal/types/types.go index 4a2073bc4..ed4fef4bd 100644 --- a/vendor/sigs.k8s.io/e2e-framework/pkg/internal/types/types.go +++ b/vendor/sigs.k8s.io/e2e-framework/pkg/internal/types/types.go @@ -21,6 +21,7 @@ import ( "testing" "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/flags" ) // EnvFunc represents a user-defined operation that @@ -67,12 +68,12 @@ type Environment interface { // Test executes a test feature defined in a TestXXX function // This method surfaces context for further updates. - Test(*testing.T, ...Feature) + Test(*testing.T, ...Feature) context.Context // TestInParallel executes a series of test features defined in a // TestXXX function in parallel. This works the same way Test method // does with the caveat that the features will all be run in parallel - TestInParallel(*testing.T, ...Feature) + TestInParallel(*testing.T, ...Feature) context.Context // AfterEachTest registers environment funcs that are executed // after each Env.Test(...). @@ -86,7 +87,7 @@ type Environment interface { Run(*testing.M) int } -type Labels map[string]string +type Labels = flags.LabelsMap type Feature interface { // Name is a descriptive text for the feature @@ -118,3 +119,19 @@ type Step interface { // Func is the operation for the step Func() StepFunc } + +type DescribableStep interface { + Step + // Description is the Readable test description indicating the purpose behind the test that + // can add more context to the test under question + Description() string +} + +type DescribableFeature interface { + Feature + + // Description is used to provide a readable context for the test feature. This can be used + // to provide more context for the test being performed and the assessment under each of the + // feature. + Description() string +} diff --git a/vendor/sigs.k8s.io/e2e-framework/support/kind/kind.go b/vendor/sigs.k8s.io/e2e-framework/support/kind/kind.go index 65ea251c5..abba976df 100644 --- a/vendor/sigs.k8s.io/e2e-framework/support/kind/kind.go +++ b/vendor/sigs.k8s.io/e2e-framework/support/kind/kind.go @@ -18,52 +18,109 @@ package kind import ( "bytes" + "context" "fmt" "io" - "io/ioutil" "os" "strings" + v1 "k8s.io/api/core/v1" + "k8s.io/client-go/rest" log "k8s.io/klog/v2" - - "github.com/vladimirvivien/gexe" + "sigs.k8s.io/e2e-framework/klient" + "sigs.k8s.io/e2e-framework/klient/conf" + "sigs.k8s.io/e2e-framework/klient/k8s/resources" + "sigs.k8s.io/e2e-framework/klient/wait" + "sigs.k8s.io/e2e-framework/klient/wait/conditions" + "sigs.k8s.io/e2e-framework/support" + "sigs.k8s.io/e2e-framework/support/utils" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -var kindVersion = "v0.11.0" +var kindVersion = "v0.17.0" type Cluster struct { + path string name string - e *gexe.Echo kubecfgFile string version string + image string + rc *rest.Config } +// Enforce Type check always to avoid future breaks +var _ support.E2EClusterProvider = &Cluster{} + func NewCluster(name string) *Cluster { - return &Cluster{name: name, e: gexe.New()} + return &Cluster{name: name} +} + +func NewProvider() support.E2EClusterProvider { + return &Cluster{} +} + +func WithImage(image string) support.ClusterOpts { + return func(c support.E2EClusterProvider) { + k, ok := c.(*Cluster) + if ok { + k.image = image + } + } +} + +func WithPath(path string) support.ClusterOpts { + return func(c support.E2EClusterProvider) { + k, ok := c.(*Cluster) + if ok { + k.path = path + } + } +} + +func (k *Cluster) SetDefaults() support.E2EClusterProvider { + if k.path == "" { + k.path = "kind" + } + return k +} + +func (k *Cluster) WithName(name string) support.E2EClusterProvider { + k.name = name + return k +} + +func (k *Cluster) WithPath(path string) support.E2EClusterProvider { + k.path = path + return k } -// WithVersion set kind version -func (k *Cluster) WithVersion(ver string) *Cluster { +func (k *Cluster) WithVersion(ver string) support.E2EClusterProvider { k.version = ver return k } +func (k *Cluster) WithOpts(opts ...support.ClusterOpts) support.E2EClusterProvider { + for _, o := range opts { + o(k) + } + return k +} + func (k *Cluster) getKubeconfig() (string, error) { kubecfg := fmt.Sprintf("%s-kubecfg", k.name) - p := k.e.StartProc(fmt.Sprintf(`kind get kubeconfig --name %s`, k.name)) + p := utils.RunCommand(fmt.Sprintf(`%s get kubeconfig --name %s`, k.path, k.name)) if p.Err() != nil { return "", fmt.Errorf("kind get kubeconfig: %w", p.Err()) } + var stdout bytes.Buffer - if _, err := stdout.ReadFrom(p.StdOut()); err != nil { + if _, err := stdout.ReadFrom(p.Out()); err != nil { return "", fmt.Errorf("kind kubeconfig stdout bytes: %w", err) } - if p.Wait().Err() != nil { - return "", fmt.Errorf("kind get kubeconfig: %s: %w", p.Result(), p.Err()) - } - file, err := ioutil.TempFile("", fmt.Sprintf("kind-cluser-%s", kubecfg)) + file, err := os.CreateTemp("", fmt.Sprintf("kind-cluser-%s", kubecfg)) if err != nil { return "", fmt.Errorf("kind kubeconfig file: %w", err) } @@ -79,7 +136,7 @@ func (k *Cluster) getKubeconfig() (string, error) { } func (k *Cluster) clusterExists(name string) (string, bool) { - clusters := k.e.Run("kind get clusters") + clusters := utils.FetchCommandOutput(fmt.Sprintf("%s get clusters", k.path)) for _, c := range strings.Split(clusters, "\n") { if c == name { return clusters, true @@ -88,13 +145,17 @@ func (k *Cluster) clusterExists(name string) (string, bool) { return clusters, false } -func (k *Cluster) CreateWithConfig(imageName, kindConfigFile string) (string, error) { - return k.Create("--image", imageName, "--config", kindConfigFile) +func (k *Cluster) CreateWithConfig(ctx context.Context, kindConfigFile string) (string, error) { + args := []string{"--config", kindConfigFile} + if k.image != "" { + args = append(args, "--image", k.image) + } + return k.Create(ctx, args...) } -func (k *Cluster) Create(args ...string) (string, error) { +func (k *Cluster) Create(ctx context.Context, args ...string) (string, error) { log.V(4).Info("Creating kind cluster ", k.name) - if err := k.findOrInstallKind(k.e); err != nil { + if err := k.findOrInstallKind(); err != nil { return "", err } @@ -103,120 +164,140 @@ func (k *Cluster) Create(args ...string) (string, error) { return k.getKubeconfig() } - command := fmt.Sprintf(`kind create cluster --name %s`, k.name) + command := fmt.Sprintf(`%s create cluster --name %s`, k.path, k.name) if len(args) > 0 { command = fmt.Sprintf("%s %s", command, strings.Join(args, " ")) } log.V(4).Info("Launching:", command) - p := k.e.RunProc(command) + p := utils.RunCommand(command) if p.Err() != nil { - return "", fmt.Errorf("failed to create kind cluster: %s : %s", p.Err(), p.Result()) + // Print the output data as well so that it can be useful to debug cluster bringup failures + var data []byte + b := bytes.NewBuffer(data) + _, err := io.Copy(b, p.Out()) + if err != nil { + log.ErrorS(err, "failed to read data from the kind create process output due to an error") + } + return "", fmt.Errorf("failed to create kind cluster: %s : %s: %s", p.Err(), p.Result(), b.String()) } - clusters, ok := k.clusterExists(k.name) if !ok { return "", fmt.Errorf("kind Cluster.Create: cluster %v still not in 'cluster list' after creation: %v", k.name, clusters) } log.V(4).Info("kind clusters available: ", clusters) - // Grab kubeconfig file for cluster. - return k.getKubeconfig() + kConfig, err := k.getKubeconfig() + if err != nil { + return "", err + } + return kConfig, k.initKubernetesAccessClients() +} + +func (k *Cluster) initKubernetesAccessClients() error { + cfg, err := conf.New(k.kubecfgFile) + if err != nil { + return err + } + k.rc = cfg + return nil } -// GetKubeconfig returns the path of the kubeconfig file -// associated with this kind cluster func (k *Cluster) GetKubeconfig() string { return k.kubecfgFile } -func (k *Cluster) GetKubeCtlContext() string { +func (k *Cluster) GetKubectlContext() string { return fmt.Sprintf("kind-%s", k.name) } -func (k *Cluster) Destroy() error { - log.V(4).Info("Destroying kind cluster ", k.name) - if err := k.findOrInstallKind(k.e); err != nil { +// ExportLogs export all cluster logs to the provided path. +func (k *Cluster) ExportLogs(ctx context.Context, dest string) error { + log.V(4).Info("Exporting kind cluster logs to ", dest) + if err := k.findOrInstallKind(); err != nil { return err } - p := k.e.RunProc(fmt.Sprintf(`kind delete cluster --name %s`, k.name)) + p := utils.RunCommand(fmt.Sprintf(`%s export logs %s --name %s`, k.path, dest, k.name)) if p.Err() != nil { - return fmt.Errorf("kind: delete cluster failed: %s: %s", p.Err(), p.Result()) - } - - log.V(4).Info("Removing kubeconfig file ", k.kubecfgFile) - if err := os.RemoveAll(k.kubecfgFile); err != nil { - return fmt.Errorf("kind: remove kubefconfig failed: %w", err) + return fmt.Errorf("kind: export cluster %v logs failed: %s: %s", k.name, p.Err(), p.Result()) } return nil } -func (k *Cluster) findOrInstallKind(e *gexe.Echo) error { - if e.Prog().Avail("kind") == "" { - log.V(4).Infof(`kind not found, installing with GO111MODULE="on" go get sigs.k8s.io/kind@%s`, kindVersion) - if err := k.installKind(e); err != nil { - return err - } - } - return nil -} - -func (k *Cluster) installKind(e *gexe.Echo) error { - if k.version != "" { - kindVersion = k.version +func (k *Cluster) Destroy(ctx context.Context) error { + log.V(4).Info("Destroying kind cluster ", k.name) + if err := k.findOrInstallKind(); err != nil { + return err } - log.V(4).Infof("Installing: go get sigs.k8s.io/kind@%s", kindVersion) - p := e.SetEnv("GO111MODULE", "on").RunProc(fmt.Sprintf("go get sigs.k8s.io/kind@%s", kindVersion)) + p := utils.RunCommand(fmt.Sprintf(`%s delete cluster --name %s`, k.path, k.name)) if p.Err() != nil { - return fmt.Errorf("failed to install kind: %s", p.Err()) + return fmt.Errorf("kind: delete cluster %v failed: %s: %s", k.name, p.Err(), p.Result()) } - if !p.IsSuccess() || p.ExitCode() != 0 { - return fmt.Errorf("failed to install kind: %s", p.Result()) + log.V(4).Info("Removing kubeconfig file ", k.kubecfgFile) + if err := os.RemoveAll(k.kubecfgFile); err != nil { + return fmt.Errorf("kind: remove kubefconfig %v failed: %w", k.kubecfgFile, err) } - // PATH may already be set to include $GOPATH/bin so we don't need to. - if kindPath := e.Prog().Avail("kind"); kindPath != "" { - log.V(4).Info("Installed kind at", kindPath) - return nil - } + return nil +} - p = e.RunProc("ls $GOPATH/bin") - if p.Err() != nil { - return fmt.Errorf("failed to install kind: %s", p.Err()) +func (k *Cluster) findOrInstallKind() error { + if k.version != "" { + kindVersion = k.version } - - p = e.RunProc("echo $PATH:$GOPATH/bin") - if p.Err() != nil { - return fmt.Errorf("failed to install kind: %s", p.Err()) + path, err := utils.FindOrInstallGoBasedProvider(k.path, "kind", "sigs.k8s.io/kind", kindVersion) + if path != "" { + k.path = path } + return err +} - log.V(4).Info(`Setting path to include $GOPATH/bin:`, p.Result()) - e.SetEnv("PATH", p.Result()) - - if kindPath := e.Prog().Avail("kind"); kindPath != "" { - log.V(4).Info("Installed kind at", kindPath) - return nil +func (k *Cluster) LoadImage(ctx context.Context, image string) error { + p := utils.RunCommand(fmt.Sprintf(`%s load docker-image --name %s %s`, k.path, k.name, image)) + if p.Err() != nil { + return fmt.Errorf("kind: load docker-image %v failed: %s: %s", image, p.Err(), p.Result()) } - return fmt.Errorf("kind not available even after installation") + return nil } -// LoadDockerImage loads a docker image from the host into the kind cluster -func (k *Cluster) LoadDockerImage(image string) error { - p := k.e.RunProc(fmt.Sprintf(`kind load docker-image --name %s %s`, k.name, image)) +func (k *Cluster) LoadImageArchive(ctx context.Context, imageArchive string) error { + p := utils.RunCommand(fmt.Sprintf(`%s load image-archive --name %s %s`, k.path, k.name, imageArchive)) if p.Err() != nil { - return fmt.Errorf("kind: load docker-image failed: %s: %s", p.Err(), p.Result()) + return fmt.Errorf("kind: load image-archive %v failed: %s: %s", imageArchive, p.Err(), p.Result()) } return nil } -// LoadImageArchive loads a docker image TAR archive from the host into the kind cluster -func (k *Cluster) LoadImageArchive(imageArchive string) error { - p := k.e.RunProc(fmt.Sprintf(`kind load image-archive --name %s %s`, k.name, imageArchive)) - if p.Err() != nil { - return fmt.Errorf("kind: load image-archive failed: %s: %s", p.Err(), p.Result()) +func (k *Cluster) WaitForControlPlane(ctx context.Context, client klient.Client) error { + r, err := resources.New(client.RESTConfig()) + if err != nil { + return err + } + for _, sl := range []metav1.LabelSelectorRequirement{ + {Key: "component", Operator: metav1.LabelSelectorOpIn, Values: []string{"etcd", "kube-apiserver", "kube-controller-manager", "kube-scheduler"}}, + {Key: "k8s-app", Operator: metav1.LabelSelectorOpIn, Values: []string{"kindnet", "kube-dns", "kube-proxy"}}, + } { + selector, err := metav1.LabelSelectorAsSelector( + &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + sl, + }, + }, + ) + if err != nil { + return err + } + err = wait.For(conditions.New(r).ResourceListN(&v1.PodList{}, len(sl.Values), resources.WithLabelSelector(selector.String()))) + if err != nil { + return err + } } return nil } + +func (k *Cluster) KubernetesRestConfig() *rest.Config { + return k.rc +} diff --git a/vendor/sigs.k8s.io/e2e-framework/support/types.go b/vendor/sigs.k8s.io/e2e-framework/support/types.go new file mode 100644 index 000000000..25fcae263 --- /dev/null +++ b/vendor/sigs.k8s.io/e2e-framework/support/types.go @@ -0,0 +1,105 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package support + +import ( + "context" + + "k8s.io/client-go/rest" + "sigs.k8s.io/e2e-framework/klient" +) + +type ClusterOpts func(c E2EClusterProvider) + +type E2EClusterProvider interface { + // WithName is used to configure the cluster Name that should be used while setting up the cluster. Might + // Not apply for all providers. + WithName(name string) E2EClusterProvider + + // WithVersion helps you override the default version used while using the cluster provider. + // This can be useful in providing a mechanism to the end users where they want to test their + // code against a certain specific version of k8s that is not the default one configured + // for the provider + WithVersion(version string) E2EClusterProvider + + // WithPath heps you customize the executable binary that is used to back the cluster provider. + // This is useful in cases where your binary is present in a non standard location output of the + // PATH variable and you want to use that instead of framework trying to install one on it's own. + WithPath(path string) E2EClusterProvider + + // WithOpts provides a way to customize the options that can be used while setting up the + // cluster using the providers such as kind or kwok or anything else. These helpers can be + // leveraged to setup arguments or configuration values that can be provided while performing + // the cluster bring up + WithOpts(opts ...ClusterOpts) E2EClusterProvider + + // Create Provides an interface to start the cluster creation workflow using the selected provider + Create(ctx context.Context, args ...string) (string, error) + + // CreateWithConfig is used to provide a mechanism where cluster providers that take an input config + // file and then setup the cluster accordingly. This can be used to provide input such as kind config + CreateWithConfig(ctx context.Context, configFile string) (string, error) + + // GetKubeconfig provides a way to extract the kubeconfig file associated with the cluster in question + // using the cluster provider native way + GetKubeconfig() string + + // GetKubectlContext is used to extract the kubectl context to be used while performing the operation + GetKubectlContext() string + + // ExportLogs is used to export the cluster logs via the cluster provider native workflow. This + // can be used to export logs from the cluster after test failures for example to analyze the test + // failures better after the fact. + ExportLogs(ctx context.Context, dest string) error + + // Destroy is used to cleanup a cluster brought up as part of the test workflow + Destroy(ctx context.Context) error + + // SetDefaults is a handler function invoked after creating an object of type E2EClusterProvider. This method is + // invoked as the first step after creating an object in order to make sure the default values for required + // attributes are setup accordingly if any. + SetDefaults() E2EClusterProvider + + // WaitForControlPlane is a helper function that can be used to indiate the Provider based cluster create workflow + // that the control plane is fully up and running. This method is invoked after the Create/CreateWithConfig handlers + // and is expected to return an error if the control plane doesn't stabilize. If the provider being implemented + // does not have a clear mechanism to identify the Control plane readiness or is not required to wait for the control + // plane to be ready, such providers can simply add a no-op workflow for this function call. + // Returning an error message from this handler will stop the workflow of e2e-framework as returning an error from this + // is considered as failure to provision a cluster + WaitForControlPlane(ctx context.Context, client klient.Client) error + + // KubernetesRestConfig is a helper function that provides an instance of rest.Config which can then be used to + // create your own clients if you chose to do so. + KubernetesRestConfig() *rest.Config +} + +type E2EClusterProviderWithImageLoader interface { + E2EClusterProvider + + // LoadImage is used to load a set of Docker images to the cluster via the cluster provider native workflow + // Not every provider will have a mechanism like this/need to do this. So, providers that do not have this support + // can just provide a no-op implementation to be compliant with the interface + LoadImage(ctx context.Context, image string) error + + // LoadImageArchive is used to provide a mechanism where a tar.gz archive containing the docker images used + // by the services running on the cluster can be imported and loaded into the cluster prior to the execution of + // test if required. + // Not every provider will have a mechanism like this/need to do this. So, providers that do not have this support + // can just provide a no-op implementation to be compliant with the interface + LoadImageArchive(ctx context.Context, archivePath string) error +} diff --git a/vendor/sigs.k8s.io/e2e-framework/support/utils/command.go b/vendor/sigs.k8s.io/e2e-framework/support/utils/command.go new file mode 100644 index 000000000..860140c36 --- /dev/null +++ b/vendor/sigs.k8s.io/e2e-framework/support/utils/command.go @@ -0,0 +1,84 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "fmt" + + "github.com/vladimirvivien/gexe" + "github.com/vladimirvivien/gexe/exec" + log "k8s.io/klog/v2" +) + +var commandRunner = gexe.New() + +// FindOrInstallGoBasedProvider check if the provider specified by the pPath executable exists or not. +// If it exists, it returns the path with no error and if not, it uses the `go install` capabilities to +// install the provider and setup the required binaries to perform the tests. In case if the install +// is done by this helper, it will return the value for installed binary as provider which can then +// be set in the in the invoker to make sure the right path is used for the binaries while invoking +// rest of the workfow after this helper is triggered. +func FindOrInstallGoBasedProvider(pPath, provider, module, version string) (string, error) { + if commandRunner.Prog().Avail(pPath) != "" { + log.V(4).InfoS("Found Provider tooling already installed on the machine", "command", pPath) + return pPath, nil + } + + installCommand := fmt.Sprintf("go install %s@%s", module, version) + log.V(4).InfoS("Installing provider tooling using go install", "command", installCommand) + p := commandRunner.RunProc(installCommand) + if p.Err() != nil { + return "", fmt.Errorf("failed to install %s: %s", pPath, p.Err()) + } + + if !p.IsSuccess() || p.ExitCode() != 0 { + return "", fmt.Errorf("failed to install %s: %s", pPath, p.Result()) + } + + if providerPath := commandRunner.Prog().Avail(provider); providerPath != "" { + log.V(4).Infof("Installed %s at", pPath, providerPath) + return provider, nil + } + + p = commandRunner.RunProc("ls $GOPATH/bin") + if p.Err() != nil { + return "", fmt.Errorf("failed to install %s: %s", pPath, p.Err()) + } + + p = commandRunner.RunProc("echo $PATH:$GOPATH/bin") + if p.Err() != nil { + return "", fmt.Errorf("failed to install %s: %s", pPath, p.Err()) + } + + log.V(4).Info(`Setting path to include $GOPATH/bin:`, p.Result()) + commandRunner.SetEnv("PATH", p.Result()) + + if providerPath := commandRunner.Prog().Avail(provider); providerPath != "" { + log.V(4).Infof("Installed %s at", pPath, providerPath) + return provider, nil + } + + return "", fmt.Errorf("%s not available even after installation", provider) +} + +func RunCommand(command string) *exec.Proc { + return commandRunner.RunProc(command) +} + +func FetchCommandOutput(command string) string { + return commandRunner.Run(command) +} diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go index 3a8b64547..6a13cf2df 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go @@ -75,6 +75,8 @@ import ( // either be any string type, an integer, implement json.Unmarshaler, or // implement encoding.TextUnmarshaler. // +// If the JSON-encoded data contain a syntax error, Unmarshal returns a SyntaxError. +// // If a JSON value is not appropriate for a given target type, // or if a JSON number overflows the target type, Unmarshal // skips that field and completes the unmarshaling as best it can. @@ -85,15 +87,14 @@ import ( // // The JSON null value unmarshals into an interface, map, pointer, or slice // by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// “not present,” unmarshaling a JSON null into any other Go type has no effect // on the value and produces no error. // // When unmarshaling quoted strings, invalid UTF-8 or // invalid UTF-16 surrogate pairs are not treated as an error. // Instead, they are replaced by the Unicode replacement // character U+FFFD. -// -func Unmarshal(data []byte, v interface{}, opts ...UnmarshalOpt) error { +func Unmarshal(data []byte, v any, opts ...UnmarshalOpt) error { // Check for well-formedness. // Avoids filling out half a data structure // before discovering a JSON syntax error. @@ -167,16 +168,16 @@ func (e *InvalidUnmarshalError) Error() string { return "json: Unmarshal(nil)" } - if e.Type.Kind() != reflect.Ptr { + if e.Type.Kind() != reflect.Pointer { return "json: Unmarshal(non-pointer " + e.Type.String() + ")" } return "json: Unmarshal(nil " + e.Type.String() + ")" } */ -func (d *decodeState) unmarshal(v interface{}) error { +func (d *decodeState) unmarshal(v any) error { rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { + if rv.Kind() != reflect.Pointer || rv.IsNil() { return &InvalidUnmarshalError{reflect.TypeOf(v)} } @@ -233,7 +234,7 @@ type decodeState struct { disallowUnknownFields bool savedStrictErrors []error - seenStrictErrors map[string]struct{} + seenStrictErrors map[strictError]struct{} strictFieldStack []string caseSensitive bool @@ -425,7 +426,7 @@ type unquotedValue struct{} // quoted string literal or literal null into an interface value. // If it finds anything other than a quoted string literal or null, // valueQuoted returns unquotedValue{}. -func (d *decodeState) valueQuoted() interface{} { +func (d *decodeState) valueQuoted() any { switch d.opcode { default: panic(phasePanicMsg) @@ -467,7 +468,7 @@ func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnm // If v is a named type and is addressable, // start with its address, so that if the type has pointer methods, // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + if v.Kind() != reflect.Pointer && v.Type().Name() != "" && v.CanAddr() { haveAddr = true v = v.Addr() } @@ -476,14 +477,14 @@ func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnm // usefully addressable. if v.Kind() == reflect.Interface && !v.IsNil() { e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + if e.Kind() == reflect.Pointer && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Pointer) { haveAddr = false v = e continue } } - if v.Kind() != reflect.Ptr { + if v.Kind() != reflect.Pointer { break } @@ -678,7 +679,7 @@ func (d *decodeState) object(v reflect.Value) error { reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: default: - if !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { + if !reflect.PointerTo(t.Key()).Implements(textUnmarshalerType) { d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) d.skip() return nil @@ -695,7 +696,7 @@ func (d *decodeState) object(v reflect.Value) error { seenKeys = map[string]struct{}{} } if _, seen := seenKeys[fieldName]; seen { - d.saveStrictError(d.newFieldError("duplicate field", fieldName)) + d.saveStrictError(d.newFieldError(duplicateStrictErrType, fieldName)) } else { seenKeys[fieldName] = struct{}{} } @@ -711,7 +712,7 @@ func (d *decodeState) object(v reflect.Value) error { var seenKeys uint64 checkDuplicateField = func(fieldNameIndex int, fieldName string) { if seenKeys&(1< startDetectingCyclesAfter { // We're a large number of nested ptrEncoder.encode calls deep; // start checking if we've run into a pointer cycle. - ptr := v.Pointer() + ptr := v.UnsafePointer() if _, ok := e.ptrSeen[ptr]; ok { e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) } @@ -877,9 +876,9 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { // Here we use a struct to memorize the pointer to the first element of the slice // and its length. ptr := struct { - ptr uintptr + ptr interface{} // always an unsafe.Pointer, but avoids a dependency on package unsafe len int - }{v.Pointer(), v.Len()} + }{v.UnsafePointer(), v.Len()} if _, ok := e.ptrSeen[ptr]; ok { e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) } @@ -893,7 +892,7 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { func newSliceEncoder(t reflect.Type) encoderFunc { // Byte slices get special treatment; arrays don't. if t.Elem().Kind() == reflect.Uint8 { - p := reflect.PtrTo(t.Elem()) + p := reflect.PointerTo(t.Elem()) if !p.Implements(marshalerType) && !p.Implements(textMarshalerType) { return encodeByteSlice } @@ -989,7 +988,7 @@ func isValidTag(s string) bool { func typeByIndex(t reflect.Type, index []int) reflect.Type { for _, i := range index { - if t.Kind() == reflect.Ptr { + if t.Kind() == reflect.Pointer { t = t.Elem() } t = t.Field(i).Type @@ -1009,7 +1008,7 @@ func (w *reflectWithString) resolve() error { return nil } if tm, ok := w.k.Interface().(encoding.TextMarshaler); ok { - if w.k.Kind() == reflect.Ptr && w.k.IsNil() { + if w.k.Kind() == reflect.Pointer && w.k.IsNil() { return nil } buf, err := tm.MarshalText() @@ -1243,7 +1242,7 @@ func typeFields(t reflect.Type) structFields { sf := f.typ.Field(i) if sf.Anonymous { t := sf.Type - if t.Kind() == reflect.Ptr { + if t.Kind() == reflect.Pointer { t = t.Elem() } if !sf.IsExported() && t.Kind() != reflect.Struct { @@ -1269,7 +1268,7 @@ func typeFields(t reflect.Type) structFields { index[len(f.index)] = i ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { + if ft.Name() == "" && ft.Kind() == reflect.Pointer { // Follow pointer. ft = ft.Elem() } diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go index 9e170127d..ab249b2bb 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go @@ -24,8 +24,9 @@ const ( // 4) simpleLetterEqualFold, no specials, no non-letters. // // The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'ſ' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign +// - S maps to s and to U+017F 'ſ' Latin small letter long s +// - k maps to K and to U+212A 'K' Kelvin sign +// // See https://play.golang.org/p/tTxjOc0OGo // // The returned function is specialized for matching against s and diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fuzz.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fuzz.go index d3fa2d111..b8f4ff2c1 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fuzz.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fuzz.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gofuzz -// +build gofuzz package json @@ -12,10 +11,10 @@ import ( ) func Fuzz(data []byte) (score int) { - for _, ctor := range []func() interface{}{ - func() interface{} { return new(interface{}) }, - func() interface{} { return new(map[string]interface{}) }, - func() interface{} { return new([]interface{}) }, + for _, ctor := range []func() any{ + func() any { return new(any) }, + func() any { return new(map[string]any) }, + func() any { return new([]any) }, } { v := ctor() err := Unmarshal(data, v) diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/kubernetes_patch.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/kubernetes_patch.go index cb9ab0627..e1c0a74d9 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/kubernetes_patch.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/kubernetes_patch.go @@ -18,7 +18,6 @@ package json import ( gojson "encoding/json" - "fmt" "strconv" "strings" ) @@ -71,32 +70,37 @@ func (d *Decoder) DisallowDuplicateFields() { d.d.disallowDuplicateFields = true } -func (d *decodeState) newFieldError(msg, field string) error { +func (d *decodeState) newFieldError(errType strictErrType, field string) *strictError { if len(d.strictFieldStack) > 0 { - return fmt.Errorf("%s %q", msg, strings.Join(d.strictFieldStack, "")+"."+field) + return &strictError{ + ErrType: errType, + Path: strings.Join(d.strictFieldStack, "") + "." + field, + } } else { - return fmt.Errorf("%s %q", msg, field) + return &strictError{ + ErrType: errType, + Path: field, + } } } // saveStrictError saves a strict decoding error, // for reporting at the end of the unmarshal if no other errors occurred. -func (d *decodeState) saveStrictError(err error) { +func (d *decodeState) saveStrictError(err *strictError) { // prevent excessive numbers of accumulated errors if len(d.savedStrictErrors) >= 100 { return } // dedupe accumulated strict errors if d.seenStrictErrors == nil { - d.seenStrictErrors = map[string]struct{}{} + d.seenStrictErrors = map[strictError]struct{}{} } - msg := err.Error() - if _, seen := d.seenStrictErrors[msg]; seen { + if _, seen := d.seenStrictErrors[*err]; seen { return } // accumulate the error - d.seenStrictErrors[msg] = struct{}{} + d.seenStrictErrors[*err] = struct{}{} d.savedStrictErrors = append(d.savedStrictErrors, err) } @@ -118,6 +122,33 @@ func (d *decodeState) appendStrictFieldStackIndex(i int) { d.strictFieldStack = append(d.strictFieldStack, "[", strconv.Itoa(i), "]") } +type strictErrType string + +const ( + unknownStrictErrType strictErrType = "unknown field" + duplicateStrictErrType strictErrType = "duplicate field" +) + +// strictError is a strict decoding error +// It has an ErrType (either unknown or duplicate) +// and a path to the erroneous field +type strictError struct { + ErrType strictErrType + Path string +} + +func (e *strictError) Error() string { + return string(e.ErrType) + " " + strconv.Quote(e.Path) +} + +func (e *strictError) FieldPath() string { + return e.Path +} + +func (e *strictError) SetFieldPath(path string) { + e.Path = path +} + // UnmarshalStrictError holds errors resulting from use of strict disallow___ decoder directives. // If this is returned from Unmarshal(), it means the decoding was successful in all other respects. type UnmarshalStrictError struct { diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go index 9dc1903e2..22fc6922d 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go @@ -27,6 +27,7 @@ func Valid(data []byte) bool { // checkValid verifies that data is valid JSON-encoded data. // scan is passed in for use by checkValid to avoid an allocation. +// checkValid returns nil or a SyntaxError. func checkValid(data []byte, scan *scanner) error { scan.reset() for _, c := range data { @@ -42,6 +43,7 @@ func checkValid(data []byte, scan *scanner) error { } // A SyntaxError is a description of a JSON syntax error. +// Unmarshal will return a SyntaxError if the JSON can't be parsed. type SyntaxError struct { msg string // description of error Offset int64 // error occurred after reading Offset bytes @@ -83,7 +85,7 @@ type scanner struct { } var scannerPool = sync.Pool{ - New: func() interface{} { + New: func() any { return &scanner{} }, } diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go index 5f87df1c6..1967755ac 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go @@ -45,7 +45,7 @@ func (dec *Decoder) DisallowUnknownFields() { dec.d.disallowUnknownFields = true // // See the documentation for Unmarshal for details about // the conversion of JSON into a Go value. -func (dec *Decoder) Decode(v interface{}) error { +func (dec *Decoder) Decode(v any) error { if dec.err != nil { return dec.err } @@ -197,7 +197,7 @@ func NewEncoder(w io.Writer) *Encoder { // // See the documentation for Marshal for details about the // conversion of Go values to JSON. -func (enc *Encoder) Encode(v interface{}) error { +func (enc *Encoder) Encode(v any) error { if enc.err != nil { return enc.err } @@ -289,8 +289,7 @@ var _ Unmarshaler = (*RawMessage)(nil) // Number, for JSON numbers // string, for JSON string literals // nil, for JSON null -// -type Token interface{} +type Token any */ const ( @@ -457,7 +456,7 @@ func (dec *Decoder) Token() (Token, error) { if !dec.tokenValueAllowed() { return dec.tokenError(c) } - var x interface{} + var x any if err := dec.Decode(&x); err != nil { return nil, err } diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/tags.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/tags.go index c38fd5102..b490328f4 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/tags.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/tags.go @@ -15,10 +15,8 @@ type tagOptions string // parseTag splits a struct field's json tag into its name and // comma-separated options. func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") + tag, opt, _ := strings.Cut(tag, ",") + return tag, tagOptions(opt) } // Contains reports whether a comma-separated list of options @@ -30,15 +28,11 @@ func (o tagOptions) Contains(optionName string) bool { } s := string(o) for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { + var name string + name, s, _ = strings.Cut(s, ",") + if name == optionName { return true } - s = next } return false } diff --git a/vendor/sigs.k8s.io/json/json.go b/vendor/sigs.k8s.io/json/json.go index 764e2a84c..e8f31b16c 100644 --- a/vendor/sigs.k8s.io/json/json.go +++ b/vendor/sigs.k8s.io/json/json.go @@ -34,13 +34,13 @@ type Decoder interface { } // NewDecoderCaseSensitivePreserveInts returns a decoder that matches the behavior of encoding/json#NewDecoder, with the following changes: -// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields) -// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded. -// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if -// the JSON data does not contain a "." character and parses as an integer successfully and -// does not overflow int64. Otherwise, the number is unmarshaled as a float64. -// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError, -// but will be recognizeable by this package's IsSyntaxError() function. +// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields) +// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded. +// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if +// the JSON data does not contain a "." character and parses as an integer successfully and +// does not overflow int64. Otherwise, the number is unmarshaled as a float64. +// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError, +// but will be recognizeable by this package's IsSyntaxError() function. func NewDecoderCaseSensitivePreserveInts(r io.Reader) Decoder { d := internaljson.NewDecoder(r) d.CaseSensitive() @@ -51,13 +51,13 @@ func NewDecoderCaseSensitivePreserveInts(r io.Reader) Decoder { // UnmarshalCaseSensitivePreserveInts parses the JSON-encoded data and stores the result in the value pointed to by v. // // UnmarshalCaseSensitivePreserveInts matches the behavior of encoding/json#Unmarshal, with the following changes: -// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields) -// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded. -// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if -// the JSON data does not contain a "." character and parses as an integer successfully and -// does not overflow int64. Otherwise, the number is unmarshaled as a float64. -// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError, -// but will be recognizeable by this package's IsSyntaxError() function. +// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields) +// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded. +// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if +// the JSON data does not contain a "." character and parses as an integer successfully and +// does not overflow int64. Otherwise, the number is unmarshaled as a float64. +// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError, +// but will be recognizeable by this package's IsSyntaxError() function. func UnmarshalCaseSensitivePreserveInts(data []byte, v interface{}) error { return internaljson.Unmarshal( data, @@ -84,6 +84,8 @@ const ( // and a list of the strict failures (if any) are returned. If no `strictOptions` are selected, // all supported strict checks are performed. // +// Strict errors returned will implement the FieldError interface for the specific erroneous fields. +// // Currently supported strict checks are: // - DisallowDuplicateFields: ensure the data contains no duplicate fields // - DisallowUnknownFields: ensure the data contains no unknown fields (when decoding into typed structs) @@ -137,3 +139,12 @@ func SyntaxErrorOffset(err error) (isSyntaxError bool, offset int64) { return false, 0 } } + +// FieldError is an error that provides access to the path of the erroneous field +type FieldError interface { + error + // FieldPath provides the full path of the erroneous field within the json object. + FieldPath() string + // SetFieldPath updates the path of the erroneous field output in the error message. + SetFieldPath(path string) +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go new file mode 100644 index 000000000..75a492d8e --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go @@ -0,0 +1,121 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package merge + +import ( + "fmt" + "sort" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// Conflict is a conflict on a specific field with the current manager of +// that field. It does implement the error interface so that it can be +// used as an error. +type Conflict struct { + Manager string + Path fieldpath.Path +} + +// Conflict is an error. +var _ error = Conflict{} + +// Error formats the conflict as an error. +func (c Conflict) Error() string { + return fmt.Sprintf("conflict with %q: %v", c.Manager, c.Path) +} + +// Equals returns true if c == c2 +func (c Conflict) Equals(c2 Conflict) bool { + if c.Manager != c2.Manager { + return false + } + return c.Path.Equals(c2.Path) +} + +// Conflicts accumulates multiple conflicts and aggregates them by managers. +type Conflicts []Conflict + +var _ error = Conflicts{} + +// Error prints the list of conflicts, grouped by sorted managers. +func (conflicts Conflicts) Error() string { + if len(conflicts) == 1 { + return conflicts[0].Error() + } + + m := map[string][]fieldpath.Path{} + for _, conflict := range conflicts { + m[conflict.Manager] = append(m[conflict.Manager], conflict.Path) + } + + managers := []string{} + for manager := range m { + managers = append(managers, manager) + } + + // Print conflicts by sorted managers. + sort.Strings(managers) + + messages := []string{} + for _, manager := range managers { + messages = append(messages, fmt.Sprintf("conflicts with %q:", manager)) + for _, path := range m[manager] { + messages = append(messages, fmt.Sprintf("- %v", path)) + } + } + return strings.Join(messages, "\n") +} + +// Equals returns true if the lists of conflicts are the same. +func (c Conflicts) Equals(c2 Conflicts) bool { + if len(c) != len(c2) { + return false + } + for i := range c { + if !c[i].Equals(c2[i]) { + return false + } + } + return true +} + +// ToSet aggregates conflicts for all managers into a single Set. +func (c Conflicts) ToSet() *fieldpath.Set { + set := fieldpath.NewSet() + for _, conflict := range []Conflict(c) { + set.Insert(conflict.Path) + } + return set +} + +// ConflictsFromManagers creates a list of conflicts given Managers sets. +func ConflictsFromManagers(sets fieldpath.ManagedFields) Conflicts { + conflicts := []Conflict{} + + for manager, set := range sets { + set.Set().Iterate(func(p fieldpath.Path) { + conflicts = append(conflicts, Conflict{ + Manager: manager, + Path: p, + }) + }) + } + + return conflicts +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go new file mode 100644 index 000000000..1b23dcbd5 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go @@ -0,0 +1,356 @@ +/* +Copyright 2018 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package merge + +import ( + "fmt" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +// Converter is an interface to the conversion logic. The converter +// needs to be able to convert objects from one version to another. +type Converter interface { + Convert(object *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error) + IsMissingVersionError(error) bool +} + +// Updater is the object used to compute updated FieldSets and also +// merge the object on Apply. +type Updater struct { + Converter Converter + IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set + + enableUnions bool +} + +// EnableUnionFeature turns on union handling. It is disabled by default until the +// feature is complete. +func (s *Updater) EnableUnionFeature() { + s.enableUnions = true +} + +func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, workflow string, force bool) (fieldpath.ManagedFields, *typed.Comparison, error) { + conflicts := fieldpath.ManagedFields{} + removed := fieldpath.ManagedFields{} + compare, err := oldObject.Compare(newObject) + if err != nil { + return nil, nil, fmt.Errorf("failed to compare objects: %v", err) + } + + versions := map[fieldpath.APIVersion]*typed.Comparison{ + version: compare.ExcludeFields(s.IgnoredFields[version]), + } + + for manager, managerSet := range managers { + if manager == workflow { + continue + } + compare, ok := versions[managerSet.APIVersion()] + if !ok { + var err error + versionedOldObject, err := s.Converter.Convert(oldObject, managerSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + delete(managers, manager) + continue + } + return nil, nil, fmt.Errorf("failed to convert old object: %v", err) + } + versionedNewObject, err := s.Converter.Convert(newObject, managerSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + delete(managers, manager) + continue + } + return nil, nil, fmt.Errorf("failed to convert new object: %v", err) + } + compare, err = versionedOldObject.Compare(versionedNewObject) + if err != nil { + return nil, nil, fmt.Errorf("failed to compare objects: %v", err) + } + versions[managerSet.APIVersion()] = compare.ExcludeFields(s.IgnoredFields[managerSet.APIVersion()]) + } + + conflictSet := managerSet.Set().Intersection(compare.Modified.Union(compare.Added)) + if !conflictSet.Empty() { + conflicts[manager] = fieldpath.NewVersionedSet(conflictSet, managerSet.APIVersion(), false) + } + + if !compare.Removed.Empty() { + removed[manager] = fieldpath.NewVersionedSet(compare.Removed, managerSet.APIVersion(), false) + } + } + + if !force && len(conflicts) != 0 { + return nil, nil, ConflictsFromManagers(conflicts) + } + + for manager, conflictSet := range conflicts { + managers[manager] = fieldpath.NewVersionedSet(managers[manager].Set().Difference(conflictSet.Set()), managers[manager].APIVersion(), managers[manager].Applied()) + } + + for manager, removedSet := range removed { + managers[manager] = fieldpath.NewVersionedSet(managers[manager].Set().Difference(removedSet.Set()), managers[manager].APIVersion(), managers[manager].Applied()) + } + + for manager := range managers { + if managers[manager].Set().Empty() { + delete(managers, manager) + } + } + + return managers, compare, nil +} + +// Update is the method you should call once you've merged your final +// object on CREATE/UPDATE/PATCH verbs. newObject must be the object +// that you intend to persist (after applying the patch if this is for a +// PATCH call), and liveObject must be the original object (empty if +// this is a CREATE call). +func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, manager string) (*typed.TypedValue, fieldpath.ManagedFields, error) { + var err error + managers, err = s.reconcileManagedFieldsWithSchemaChanges(liveObject, managers) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if s.enableUnions { + newObject, err = liveObject.NormalizeUnions(newObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + } + managers, compare, err := s.update(liveObject, newObject, version, managers, manager, true) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if _, ok := managers[manager]; !ok { + managers[manager] = fieldpath.NewVersionedSet(fieldpath.NewSet(), version, false) + } + + ignored := s.IgnoredFields[version] + if ignored == nil { + ignored = fieldpath.NewSet() + } + managers[manager] = fieldpath.NewVersionedSet( + managers[manager].Set().Union(compare.Modified).Union(compare.Added).Difference(compare.Removed).RecursiveDifference(ignored), + version, + false, + ) + if managers[manager].Set().Empty() { + delete(managers, manager) + } + return newObject, managers, nil +} + +// Apply should be called when Apply is run, given the current object as +// well as the configuration that is applied. This will merge the object +// and return it. If the object hasn't changed, nil is returned (the +// managers can still have changed though). +func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, manager string, force bool) (*typed.TypedValue, fieldpath.ManagedFields, error) { + var err error + managers, err = s.reconcileManagedFieldsWithSchemaChanges(liveObject, managers) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if s.enableUnions { + configObject, err = configObject.NormalizeUnionsApply(configObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + } + newObject, err := liveObject.Merge(configObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to merge config: %v", err) + } + if s.enableUnions { + newObject, err = configObject.NormalizeUnionsApply(newObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + } + lastSet := managers[manager] + set, err := configObject.ToFieldSet() + if err != nil { + return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to get field set: %v", err) + } + + ignored := s.IgnoredFields[version] + if ignored != nil { + set = set.RecursiveDifference(ignored) + // TODO: is this correct. If we don't remove from lastSet pruning might remove the fields? + if lastSet != nil { + lastSet.Set().RecursiveDifference(ignored) + } + } + managers[manager] = fieldpath.NewVersionedSet(set, version, true) + newObject, err = s.prune(newObject, managers, manager, lastSet) + if err != nil { + return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to prune fields: %v", err) + } + managers, compare, err := s.update(liveObject, newObject, version, managers, manager, force) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if compare.IsSame() { + newObject = nil + } + return newObject, managers, nil +} + +// prune will remove a field, list or map item, iff: +// * applyingManager applied it last time +// * applyingManager didn't apply it this time +// * no other applier claims to manage it +func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFields, applyingManager string, lastSet fieldpath.VersionedSet) (*typed.TypedValue, error) { + if lastSet == nil || lastSet.Set().Empty() { + return merged, nil + } + convertedMerged, err := s.Converter.Convert(merged, lastSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + return merged, nil + } + return nil, fmt.Errorf("failed to convert merged object to last applied version: %v", err) + } + + sc, tr := convertedMerged.Schema(), convertedMerged.TypeRef() + pruned := convertedMerged.RemoveItems(lastSet.Set().EnsureNamedFieldsAreMembers(sc, tr)) + pruned, err = s.addBackOwnedItems(convertedMerged, pruned, managers, applyingManager) + if err != nil { + return nil, fmt.Errorf("failed add back owned items: %v", err) + } + pruned, err = s.addBackDanglingItems(convertedMerged, pruned, lastSet) + if err != nil { + return nil, fmt.Errorf("failed add back dangling items: %v", err) + } + return s.Converter.Convert(pruned, managers[applyingManager].APIVersion()) +} + +// addBackOwnedItems adds back any fields, list and map items that were removed by prune, +// but other appliers or updaters (or the current applier's new config) claim to own. +func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, managedFields fieldpath.ManagedFields, applyingManager string) (*typed.TypedValue, error) { + var err error + managedAtVersion := map[fieldpath.APIVersion]*fieldpath.Set{} + for _, managerSet := range managedFields { + if _, ok := managedAtVersion[managerSet.APIVersion()]; !ok { + managedAtVersion[managerSet.APIVersion()] = fieldpath.NewSet() + } + managedAtVersion[managerSet.APIVersion()] = managedAtVersion[managerSet.APIVersion()].Union(managerSet.Set()) + } + // Add back owned items at pruned version first to avoid conversion failure + // caused by pruned fields which are required for conversion. + prunedVersion := fieldpath.APIVersion(*pruned.TypeRef().NamedType) + if managed, ok := managedAtVersion[prunedVersion]; ok { + merged, pruned, err = s.addBackOwnedItemsForVersion(merged, pruned, prunedVersion, managed) + if err != nil { + return nil, err + } + delete(managedAtVersion, prunedVersion) + } + for version, managed := range managedAtVersion { + merged, pruned, err = s.addBackOwnedItemsForVersion(merged, pruned, version, managed) + if err != nil { + return nil, err + } + } + return pruned, nil +} + +// addBackOwnedItemsForVersion adds back any fields, list and map items that were removed by prune with specific managed field path at a version. +// It is an extracted sub-function from addBackOwnedItems for code reuse. +func (s *Updater) addBackOwnedItemsForVersion(merged, pruned *typed.TypedValue, version fieldpath.APIVersion, managed *fieldpath.Set) (*typed.TypedValue, *typed.TypedValue, error) { + var err error + merged, err = s.Converter.Convert(merged, version) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + return merged, pruned, nil + } + return nil, nil, fmt.Errorf("failed to convert merged object at version %v: %v", version, err) + } + pruned, err = s.Converter.Convert(pruned, version) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + return merged, pruned, nil + } + return nil, nil, fmt.Errorf("failed to convert pruned object at version %v: %v", version, err) + } + mergedSet, err := merged.ToFieldSet() + if err != nil { + return nil, nil, fmt.Errorf("failed to create field set from merged object at version %v: %v", version, err) + } + prunedSet, err := pruned.ToFieldSet() + if err != nil { + return nil, nil, fmt.Errorf("failed to create field set from pruned object at version %v: %v", version, err) + } + sc, tr := merged.Schema(), merged.TypeRef() + pruned = merged.RemoveItems(mergedSet.EnsureNamedFieldsAreMembers(sc, tr).Difference(prunedSet.EnsureNamedFieldsAreMembers(sc, tr).Union(managed.EnsureNamedFieldsAreMembers(sc, tr)))) + return merged, pruned, nil +} + +// addBackDanglingItems makes sure that the fields list and map items removed by prune were +// previously owned by the currently applying manager. This will add back fields list and map items +// that are unowned or that are owned by Updaters and shouldn't be removed. +func (s *Updater) addBackDanglingItems(merged, pruned *typed.TypedValue, lastSet fieldpath.VersionedSet) (*typed.TypedValue, error) { + convertedPruned, err := s.Converter.Convert(pruned, lastSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + return merged, nil + } + return nil, fmt.Errorf("failed to convert pruned object to last applied version: %v", err) + } + prunedSet, err := convertedPruned.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create field set from pruned object in last applied version: %v", err) + } + mergedSet, err := merged.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create field set from merged object in last applied version: %v", err) + } + sc, tr := merged.Schema(), merged.TypeRef() + prunedSet = prunedSet.EnsureNamedFieldsAreMembers(sc, tr) + mergedSet = mergedSet.EnsureNamedFieldsAreMembers(sc, tr) + last := lastSet.Set().EnsureNamedFieldsAreMembers(sc, tr) + return merged.RemoveItems(mergedSet.Difference(prunedSet).Intersection(last)), nil +} + +// reconcileManagedFieldsWithSchemaChanges reconciles the managed fields with any changes to the +// object's schema since the managed fields were written. +// +// Supports: +// - changing types from atomic to granular +// - changing types from granular to atomic +func (s *Updater) reconcileManagedFieldsWithSchemaChanges(liveObject *typed.TypedValue, managers fieldpath.ManagedFields) (fieldpath.ManagedFields, error) { + result := fieldpath.ManagedFields{} + for manager, versionedSet := range managers { + tv, err := s.Converter.Convert(liveObject, versionedSet.APIVersion()) + if s.Converter.IsMissingVersionError(err) { // okay to skip, obsolete versions will be deleted automatically anyway + continue + } + if err != nil { + return nil, err + } + reconciled, err := typed.ReconcileFieldSetWithSchema(versionedSet.Set(), tv) + if err != nil { + return nil, err + } + if reconciled != nil { + result[manager] = fieldpath.NewVersionedSet(reconciled, versionedSet.APIVersion(), versionedSet.Applied()) + } else { + result[manager] = versionedSet + } + } + return result, nil +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go index 01103b38a..7e5dc7582 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go @@ -16,7 +16,9 @@ limitations under the License. package schema -import "sync" +import ( + "sync" +) // Schema is a list of named types. // @@ -27,6 +29,11 @@ type Schema struct { once sync.Once m map[string]TypeDef + + lock sync.Mutex + // Cached results of resolving type references to atoms. Only stores + // type references which require fields of Atom to be overriden. + resolvedTypes map[TypeRef]Atom } // A TypeSpecifier references a particular type in a schema. @@ -48,6 +55,12 @@ type TypeRef struct { // Either the name or one member of Atom should be set. NamedType *string `yaml:"namedType,omitempty"` Inlined Atom `yaml:",inline,omitempty"` + + // If this reference refers to a map-type or list-type, this field overrides + // the `ElementRelationship` of the referred type when resolved. + // If this field is nil, then it has no effect. + // See `Map` and `List` for more information about `ElementRelationship` + ElementRelationship *ElementRelationship `yaml:"elementRelationship,omitempty"` } // Atom represents the smallest possible pieces of the type system. @@ -88,11 +101,11 @@ const ( // Map is a key-value pair. Its default semantics are the same as an // associative list, but: -// * It is serialized differently: +// - It is serialized differently: // map: {"k": {"value": "v"}} // list: [{"key": "k", "value": "v"}] -// * Keys must be string typed. -// * Keys can't have multiple components. +// - Keys must be string typed. +// - Keys can't have multiple components. // // Optionally, maps may be atomic (for example, imagine representing an RGB // color value--it doesn't make sense to have different actors own the R and G @@ -146,6 +159,31 @@ func (m *Map) FindField(name string) (StructField, bool) { return sf, ok } +// CopyInto this instance of Map into the other +// If other is nil this method does nothing. +// If other is already initialized, overwrites it with this instance +// Warning: Not thread safe +func (m *Map) CopyInto(dst *Map) { + if dst == nil { + return + } + + // Map type is considered immutable so sharing references + dst.Fields = m.Fields + dst.ElementType = m.ElementType + dst.Unions = m.Unions + dst.ElementRelationship = m.ElementRelationship + + if m.m != nil { + // If cache is non-nil then the once token had been consumed. + // Must reset token and use it again to ensure same semantics. + dst.once = sync.Once{} + dst.once.Do(func() { + dst.m = m.m + }) + } +} + // UnionFields are mapping between the fields that are part of the union and // their discriminated value. The discriminated value has to be set, and // should not conflict with other discriminated value in the list. @@ -244,18 +282,93 @@ func (s *Schema) FindNamedType(name string) (TypeDef, bool) { return t, ok } +func (s *Schema) resolveNoOverrides(tr TypeRef) (Atom, bool) { + result := Atom{} + + if tr.NamedType != nil { + t, ok := s.FindNamedType(*tr.NamedType) + if !ok { + return Atom{}, false + } + + result = t.Atom + } else { + result = tr.Inlined + } + + return result, true +} + // Resolve is a convenience function which returns the atom referenced, whether // it is inline or named. Returns (Atom{}, false) if the type can't be resolved. // // This allows callers to not care about the difference between a (possibly // inlined) reference and a definition. func (s *Schema) Resolve(tr TypeRef) (Atom, bool) { - if tr.NamedType != nil { - t, ok := s.FindNamedType(*tr.NamedType) - if !ok { + // If this is a plain reference with no overrides, just return the type + if tr.ElementRelationship == nil { + return s.resolveNoOverrides(tr) + } + + s.lock.Lock() + defer s.lock.Unlock() + + if s.resolvedTypes == nil { + s.resolvedTypes = make(map[TypeRef]Atom) + } + + var result Atom + var exists bool + + // Return cached result if available + // If not, calculate result and cache it + if result, exists = s.resolvedTypes[tr]; !exists { + if result, exists = s.resolveNoOverrides(tr); exists { + // Allow field-level electives to override the referred type's modifiers + switch { + case result.Map != nil: + mapCopy := Map{} + result.Map.CopyInto(&mapCopy) + mapCopy.ElementRelationship = *tr.ElementRelationship + result.Map = &mapCopy + case result.List != nil: + listCopy := *result.List + listCopy.ElementRelationship = *tr.ElementRelationship + result.List = &listCopy + case result.Scalar != nil: + return Atom{}, false + default: + return Atom{}, false + } + } else { return Atom{}, false } - return t.Atom, true + + // Save result. If it is nil, that is also recorded as not existing. + s.resolvedTypes[tr] = result + } + + return result, true +} + +// Clones this instance of Schema into the other +// If other is nil this method does nothing. +// If other is already initialized, overwrites it with this instance +// Warning: Not thread safe +func (s *Schema) CopyInto(dst *Schema) { + if dst == nil { + return + } + + // Schema type is considered immutable so sharing references + dst.Types = s.Types + + if s.m != nil { + // If cache is non-nil then the once token had been consumed. + // Must reset token and use it again to ensure same semantics. + dst.once = sync.Once{} + dst.once.Do(func() { + dst.m = s.m + }) } - return tr.Inlined, true } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go index 4c303eecc..b668eff83 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go @@ -52,6 +52,9 @@ func (a *TypeRef) Equals(b *TypeRef) bool { } //return true } + if a.ElementRelationship != b.ElementRelationship { + return false + } return a.Inlined.Equals(&b.Inlined) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go index bb60e2a5f..7d64d1308 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go @@ -66,6 +66,9 @@ var SchemaSchemaYAML = `types: - name: untyped type: namedType: untyped + - name: elementRelationship + type: + scalar: string - name: scalar scalar: string - name: map diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go index 6b2b2cb4a..19c77334f 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go @@ -105,7 +105,11 @@ type atomHandler interface { func resolveSchema(s *schema.Schema, tr schema.TypeRef, v value.Value, ah atomHandler) ValidationErrors { a, ok := s.Resolve(tr) if !ok { - return errorf("schema error: no type found matching: %v", *tr.NamedType) + typeName := "inlined type" + if tr.NamedType != nil { + typeName = *tr.NamedType + } + return errorf("schema error: no type found matching: %v", typeName) } a = deduceAtom(a, v) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go index 75244ef64..913644083 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go @@ -80,7 +80,12 @@ func (w *mergingWalker) merge(prefixFn func() string) (errs ValidationErrors) { alhs := deduceAtom(a, w.lhs) arhs := deduceAtom(a, w.rhs) - if alhs.Equals(&arhs) { + + // deduceAtom does not fix the type for nil values + // nil is a wildcard and will accept whatever form the other operand takes + if w.rhs == nil { + errs = append(errs, handleAtom(alhs, w.typeRef, w)...) + } else if w.lhs == nil || alhs.Equals(&arhs) { errs = append(errs, handleAtom(arhs, w.typeRef, w)...) } else { w2 := *w diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go index 2b98b729c..6a7697e3b 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go @@ -110,7 +110,7 @@ func (v *reconcileWithSchemaWalker) finishDescent(v2 *reconcileWithSchemaWalker) } // ReconcileFieldSetWithSchema reconciles the a field set with any changes to the -//// object's schema since the field set was written. Returns the reconciled field set, or nil of +// object's schema since the field set was written. Returns the reconciled field set, or nil of // no changes were made to the field set. // // Supports: diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go index e9e6be8be..d63a97fe2 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go @@ -99,12 +99,13 @@ func (tv TypedValue) ToFieldSet() (*fieldpath.Set, error) { // Merge returns the result of merging tv and pso ("partially specified // object") together. Of note: -// * No fields can be removed by this operation. -// * If both tv and pso specify a given leaf field, the result will keep pso's -// value. -// * Container typed elements will have their items ordered: -// * like tv, if pso doesn't change anything in the container -// * like pso, if pso does change something in the container. +// - No fields can be removed by this operation. +// - If both tv and pso specify a given leaf field, the result will keep pso's +// value. +// - Container typed elements will have their items ordered: +// 1. like tv, if pso doesn't change anything in the container +// 2. like pso, if pso does change something in the container. +// // tv and pso must both be of the same type (their Schema and TypeRef must // match), or an error will be returned. Validation errors will be returned if // the objects don't conform to the schema.